qeth_core_main.c 187.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
F
Frank Blaschka 已提交
2
/*
3
 *    Copyright IBM Corp. 2007, 2009
F
Frank Blaschka 已提交
4 5 6 7 8 9
 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 *		 Frank Pavlic <fpavlic@de.ibm.com>,
 *		 Thomas Spatzier <tspat@de.ibm.com>,
 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
 */

10 11 12
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

13
#include <linux/compat.h>
F
Frank Blaschka 已提交
14 15 16 17 18
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20
#include <linux/io.h>
F
Frank Blaschka 已提交
21 22 23
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
J
Julian Wiedmann 已提交
24
#include <linux/mm.h>
F
Frank Blaschka 已提交
25
#include <linux/kthread.h>
26
#include <linux/slab.h>
27 28 29
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
30
#include <linux/rcutree.h>
31
#include <linux/skbuff.h>
32
#include <linux/vmalloc.h>
33

34
#include <net/iucv/af_iucv.h>
35
#include <net/dsfield.h>
F
Frank Blaschka 已提交
36

37
#include <asm/ebcdic.h>
38
#include <asm/chpid.h>
39
#include <asm/sysinfo.h>
40 41 42
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
43
#include <asm/cpcmd.h>
F
Frank Blaschka 已提交
44 45 46

#include "qeth_core.h"

47 48 49 50 51
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
	/*                   N  P  A    M  L  V                      H  */
	[QETH_DBF_SETUP] = {"qeth_setup",
				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
52 53
	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
			    &debug_sprintf_view, NULL},
54 55 56 57
	[QETH_DBF_CTRL]  = {"qeth_control",
		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
F
Frank Blaschka 已提交
58

59 60
struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
61
static struct kmem_cache *qeth_qdio_outbuf_cache;
F
Frank Blaschka 已提交
62 63

static struct device *qeth_core_root_dev;
64
static struct dentry *qeth_debugfs_root;
F
Frank Blaschka 已提交
65 66
static struct lock_class_key qdio_out_skb_queue_key;

67
static void qeth_issue_next_read_cb(struct qeth_card *card,
68 69
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length);
F
Frank Blaschka 已提交
70
static int qeth_qdio_establish(struct qeth_card *);
71
static void qeth_free_qdio_queues(struct qeth_card *card);
72 73 74
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
		struct qeth_qdio_out_buffer *buf,
		enum iucv_tx_notify notification);
75 76
static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
				 int budget);
77
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
F
Frank Blaschka 已提交
78

S
Stefan Raspl 已提交
79 80 81 82 83 84 85 86 87
static void qeth_close_dev_handler(struct work_struct *work)
{
	struct qeth_card *card;

	card = container_of(work, struct qeth_card, close_dev_work);
	QETH_CARD_TEXT(card, 2, "cldevhdl");
	ccwgroup_set_offline(card->gdev);
}

J
Julian Wiedmann 已提交
88
static const char *qeth_get_cardname(struct qeth_card *card)
F
Frank Blaschka 已提交
89
{
90
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
91
		switch (card->info.type) {
92
		case QETH_CARD_TYPE_OSD:
93
			return " Virtual NIC QDIO";
F
Frank Blaschka 已提交
94
		case QETH_CARD_TYPE_IQD:
95
			return " Virtual NIC Hiper";
96
		case QETH_CARD_TYPE_OSM:
97
			return " Virtual NIC QDIO - OSM";
98
		case QETH_CARD_TYPE_OSX:
99
			return " Virtual NIC QDIO - OSX";
F
Frank Blaschka 已提交
100 101 102 103 104
		default:
			return " unknown";
		}
	} else {
		switch (card->info.type) {
105
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
106 107 108 109 110
			return " OSD Express";
		case QETH_CARD_TYPE_IQD:
			return " HiperSockets";
		case QETH_CARD_TYPE_OSN:
			return " OSN QDIO";
111 112 113 114
		case QETH_CARD_TYPE_OSM:
			return " OSM QDIO";
		case QETH_CARD_TYPE_OSX:
			return " OSX QDIO";
F
Frank Blaschka 已提交
115 116 117 118 119 120 121 122 123 124
		default:
			return " unknown";
		}
	}
	return " n/a";
}

/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
125
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
126
		switch (card->info.type) {
127
		case QETH_CARD_TYPE_OSD:
128
			return "Virt.NIC QDIO";
F
Frank Blaschka 已提交
129
		case QETH_CARD_TYPE_IQD:
130
			return "Virt.NIC Hiper";
131
		case QETH_CARD_TYPE_OSM:
132
			return "Virt.NIC OSM";
133
		case QETH_CARD_TYPE_OSX:
134
			return "Virt.NIC OSX";
F
Frank Blaschka 已提交
135 136 137 138 139
		default:
			return "unknown";
		}
	} else {
		switch (card->info.type) {
140
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
141 142 143 144 145 146 147 148 149
			switch (card->info.link_type) {
			case QETH_LINK_TYPE_FAST_ETH:
				return "OSD_100";
			case QETH_LINK_TYPE_HSTR:
				return "HSTR";
			case QETH_LINK_TYPE_GBIT_ETH:
				return "OSD_1000";
			case QETH_LINK_TYPE_10GBIT_ETH:
				return "OSD_10GIG";
150 151
			case QETH_LINK_TYPE_25GBIT_ETH:
				return "OSD_25GIG";
F
Frank Blaschka 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
			case QETH_LINK_TYPE_LANE_ETH100:
				return "OSD_FE_LANE";
			case QETH_LINK_TYPE_LANE_TR:
				return "OSD_TR_LANE";
			case QETH_LINK_TYPE_LANE_ETH1000:
				return "OSD_GbE_LANE";
			case QETH_LINK_TYPE_LANE:
				return "OSD_ATM_LANE";
			default:
				return "OSD_Express";
			}
		case QETH_CARD_TYPE_IQD:
			return "HiperSockets";
		case QETH_CARD_TYPE_OSN:
			return "OSN";
167 168 169 170
		case QETH_CARD_TYPE_OSM:
			return "OSM_1000";
		case QETH_CARD_TYPE_OSX:
			return "OSX_10GIG";
F
Frank Blaschka 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
		default:
			return "unknown";
		}
	}
	return "n/a";
}

void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
			 int clear_start_mask)
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_allowed_mask = threads;
	if (clear_start_mask)
		card->thread_start_mask &= threads;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);

int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	rc = (card->thread_running_mask & threads);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);

204
static void qeth_clear_working_pool_list(struct qeth_card *card)
F
Frank Blaschka 已提交
205 206
{
	struct qeth_buffer_pool_entry *pool_entry, *tmp;
207 208
	struct qeth_qdio_q *queue = card->qdio.in_q;
	unsigned int i;
F
Frank Blaschka 已提交
209

C
Carsten Otte 已提交
210
	QETH_CARD_TEXT(card, 5, "clwrklst");
F
Frank Blaschka 已提交
211
	list_for_each_entry_safe(pool_entry, tmp,
212 213
				 &card->qdio.in_buf_pool.entry_list, list)
		list_del(&pool_entry->list);
214 215 216

	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
		queue->bufs[i].pool_entry = NULL;
F
Frank Blaschka 已提交
217 218
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
		if (entry->elements[i])
			__free_page(entry->elements[i]);
	}

	kfree(entry);
}

static void qeth_free_buffer_pool(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
				 init_list) {
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);
	}
}

static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
{
	struct qeth_buffer_pool_entry *entry;
	unsigned int i;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	for (i = 0; i < pages; i++) {
252
		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
253 254 255 256 257 258 259 260 261 262

		if (!entry->elements[i]) {
			qeth_free_pool_entry(entry);
			return NULL;
		}
	}

	return entry;
}

F
Frank Blaschka 已提交
263 264
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
265 266
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	unsigned int i;
F
Frank Blaschka 已提交
267

C
Carsten Otte 已提交
268
	QETH_CARD_TEXT(card, 5, "alocpool");
F
Frank Blaschka 已提交
269
	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
270 271 272 273
		struct qeth_buffer_pool_entry *entry;

		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
F
Frank Blaschka 已提交
274 275 276
			qeth_free_buffer_pool(card);
			return -ENOMEM;
		}
277

278
		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
F
Frank Blaschka 已提交
279 280 281 282
	}
	return 0;
}

283
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
F
Frank Blaschka 已提交
284
{
285 286 287 288 289 290
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
	struct qeth_buffer_pool_entry *entry, *tmp;
	int delta = count - pool->buf_count;
	LIST_HEAD(entries);

C
Carsten Otte 已提交
291
	QETH_CARD_TEXT(card, 2, "realcbp");
F
Frank Blaschka 已提交
292

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	/* Defer until queue is allocated: */
	if (!card->qdio.in_q)
		goto out;

	/* Remove entries from the pool: */
	while (delta < 0) {
		entry = list_first_entry(&pool->entry_list,
					 struct qeth_buffer_pool_entry,
					 init_list);
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);

		delta++;
	}

	/* Allocate additional entries: */
	while (delta > 0) {
		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
			list_for_each_entry_safe(entry, tmp, &entries,
						 init_list) {
				list_del(&entry->init_list);
				qeth_free_pool_entry(entry);
			}

			return -ENOMEM;
		}

		list_add(&entry->init_list, &entries);

		delta--;
	}

	list_splice(&entries, &pool->entry_list);

out:
	card->qdio.in_buf_pool.buf_count = count;
	pool->buf_count = count;
	return 0;
F
Frank Blaschka 已提交
332
}
333
EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
F
Frank Blaschka 已提交
334

S
Sebastian Ott 已提交
335 336
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
337 338 339 340
	if (!q)
		return;

	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
S
Sebastian Ott 已提交
341 342 343 344 345 346 347 348 349 350 351
	kfree(q);
}

static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
{
	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
	int i;

	if (!q)
		return NULL;

352 353 354 355 356
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
		kfree(q);
		return NULL;
	}

S
Sebastian Ott 已提交
357
	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
358
		q->bufs[i].buffer = q->qdio_bufs[i];
S
Sebastian Ott 已提交
359 360 361 362 363

	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
	return q;
}

J
Julian Wiedmann 已提交
364
static int qeth_cq_init(struct qeth_card *card)
365 366 367 368
{
	int rc;

	if (card->options.cq == QETH_CQ_ENABLED) {
369
		QETH_CARD_TEXT(card, 2, "cqinit");
370 371
		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
				   QDIO_MAX_BUFFERS_PER_Q);
372 373 374 375 376
		card->qdio.c_q->next_buf_to_init = 127;
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
			     card->qdio.no_in_queues - 1, 0,
			     127);
		if (rc) {
377
			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
378 379 380 381 382 383 384 385
			goto out;
		}
	}
	rc = 0;
out:
	return rc;
}

J
Julian Wiedmann 已提交
386
static int qeth_alloc_cq(struct qeth_card *card)
387 388 389 390 391 392 393
{
	int rc;

	if (card->options.cq == QETH_CQ_ENABLED) {
		int i;
		struct qdio_outbuf_state *outbuf_states;

394
		QETH_CARD_TEXT(card, 2, "cqon");
S
Sebastian Ott 已提交
395
		card->qdio.c_q = qeth_alloc_qdio_queue();
396 397 398 399 400
		if (!card->qdio.c_q) {
			rc = -1;
			goto kmsg_out;
		}
		card->qdio.no_in_queues = 2;
401
		card->qdio.out_bufstates =
K
Kees Cook 已提交
402 403 404 405
			kcalloc(card->qdio.no_out_queues *
					QDIO_MAX_BUFFERS_PER_Q,
				sizeof(struct qdio_outbuf_state),
				GFP_KERNEL);
406 407 408 409 410 411 412 413 414 415
		outbuf_states = card->qdio.out_bufstates;
		if (outbuf_states == NULL) {
			rc = -1;
			goto free_cq_out;
		}
		for (i = 0; i < card->qdio.no_out_queues; ++i) {
			card->qdio.out_qs[i]->bufstates = outbuf_states;
			outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
		}
	} else {
416
		QETH_CARD_TEXT(card, 2, "nocq");
417 418 419
		card->qdio.c_q = NULL;
		card->qdio.no_in_queues = 1;
	}
420
	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
421 422 423 424
	rc = 0;
out:
	return rc;
free_cq_out:
S
Sebastian Ott 已提交
425
	qeth_free_qdio_queue(card->qdio.c_q);
426 427 428 429 430 431
	card->qdio.c_q = NULL;
kmsg_out:
	dev_err(&card->gdev->dev, "Failed to create completion queue\n");
	goto out;
}

J
Julian Wiedmann 已提交
432
static void qeth_free_cq(struct qeth_card *card)
433 434 435
{
	if (card->qdio.c_q) {
		--card->qdio.no_in_queues;
S
Sebastian Ott 已提交
436
		qeth_free_qdio_queue(card->qdio.c_q);
437 438 439 440 441 442
		card->qdio.c_q = NULL;
	}
	kfree(card->qdio.out_bufstates);
	card->qdio.out_bufstates = NULL;
}

J
Julian Wiedmann 已提交
443 444 445
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
							int delayed)
{
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
	enum iucv_tx_notify n;

	switch (sbalf15) {
	case 0:
		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
		break;
	case 4:
	case 16:
	case 17:
	case 18:
		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
			TX_NOTIFY_UNREACHABLE;
		break;
	default:
		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
			TX_NOTIFY_GENERALERROR;
		break;
	}

	return n;
}

J
Julian Wiedmann 已提交
468 469
static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
					 int forced_cleanup)
470
{
471 472 473
	if (q->card->options.cq != QETH_CQ_ENABLED)
		return;

474 475 476 477 478 479 480 481 482
	if (q->bufs[bidx]->next_pending != NULL) {
		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;

		while (c) {
			if (forced_cleanup ||
			    atomic_read(&c->state) ==
			      QETH_QDIO_BUF_HANDLED_DELAYED) {
				struct qeth_qdio_out_buffer *f = c;
483

484 485
				QETH_CARD_TEXT(f->q->card, 5, "fp");
				QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
486 487 488
				/* release here to avoid interleaving between
				   outbound tasklet and inbound tasklet
				   regarding notifications and lifecycle */
489
				qeth_tx_complete_buf(c, forced_cleanup, 0);
490

491
				c = f->next_pending;
S
Stefan Raspl 已提交
492
				WARN_ON_ONCE(head->next_pending != f);
493 494 495 496 497 498 499 500 501
				head->next_pending = c;
				kmem_cache_free(qeth_qdio_outbuf_cache, f);
			} else {
				head = c;
				c = c->next_pending;
			}

		}
	}
502 503 504 505 506 507
	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
					QETH_QDIO_BUF_HANDLED_DELAYED)) {
		/* for recovery situations */
		qeth_init_qdio_out_buf(q, bidx);
		QETH_CARD_TEXT(q->card, 2, "clprecov");
	}
508 509
}

J
Julian Wiedmann 已提交
510 511 512
static void qeth_qdio_handle_aob(struct qeth_card *card,
				 unsigned long phys_aob_addr)
{
513 514
	struct qaob *aob;
	struct qeth_qdio_out_buffer *buffer;
515
	enum iucv_tx_notify notification;
516
	unsigned int i;
517 518 519 520 521 522 523

	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
	QETH_CARD_TEXT(card, 5, "haob");
	QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);

524 525 526 527
	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
		notification = TX_NOTIFY_OK;
	} else {
S
Stefan Raspl 已提交
528 529
		WARN_ON_ONCE(atomic_read(&buffer->state) !=
							QETH_QDIO_BUF_PENDING);
530 531 532 533 534 535 536 537 538 539
		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
		notification = TX_NOTIFY_DELAYED_OK;
	}

	if (aob->aorc != 0)  {
		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
		notification = qeth_compute_cq_notification(aob->aorc, 1);
	}
	qeth_notify_skbs(buffer->q, buffer, notification);

540 541 542 543 544 545
	/* Free dangling allocations. The attached skbs are handled by
	 * qeth_cleanup_handled_pending().
	 */
	for (i = 0;
	     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
	     i++) {
546 547 548 549
		void *data = phys_to_virt(aob->sba[i]);

		if (data && buffer->is_header[i])
			kmem_cache_free(qeth_core_header_cache, data);
550 551
	}
	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
552

553 554 555
	qdio_release_aob(aob);
}

556 557
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
			   void *data)
558 559
{
	ccw->cmd_code = cmd_code;
560
	ccw->flags = flags | CCW_FLAG_SLI;
561 562 563 564
	ccw->count = len;
	ccw->cda = (__u32) __pa(data);
}

565
static int __qeth_issue_next_read(struct qeth_card *card)
F
Frank Blaschka 已提交
566
{
567 568 569
	struct qeth_cmd_buffer *iob = card->read_cmd;
	struct qeth_channel *channel = iob->channel;
	struct ccw1 *ccw = __ccw_from_cmd(iob);
570
	int rc;
F
Frank Blaschka 已提交
571

C
Carsten Otte 已提交
572
	QETH_CARD_TEXT(card, 5, "issnxrd");
573
	if (channel->state != CH_STATE_UP)
F
Frank Blaschka 已提交
574
		return -EIO;
575

576 577
	memset(iob->data, 0, iob->length);
	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
578
	iob->callback = qeth_issue_next_read_cb;
579 580 581
	/* keep the cmd alive after completion: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
582
	QETH_CARD_TEXT(card, 6, "noirqpnd");
583
	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
584 585 586
	if (!rc) {
		channel->active_cmd = iob;
	} else {
587 588
		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
				 rc, CARD_DEVID(card));
589
		qeth_unlock_channel(card, channel);
590
		qeth_put_cmd(iob);
591
		card->read_or_write_problem = 1;
F
Frank Blaschka 已提交
592 593 594 595 596
		qeth_schedule_recovery(card);
	}
	return rc;
}

597 598 599 600 601 602 603 604 605 606 607
static int qeth_issue_next_read(struct qeth_card *card)
{
	int ret;

	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
	ret = __qeth_issue_next_read(card);
	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));

	return ret;
}

608 609
static void qeth_enqueue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
610 611
{
	spin_lock_irq(&card->lock);
612
	list_add_tail(&iob->list, &card->cmd_waiter_list);
613 614 615
	spin_unlock_irq(&card->lock);
}

616 617
static void qeth_dequeue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
618 619
{
	spin_lock_irq(&card->lock);
620
	list_del(&iob->list);
621 622 623
	spin_unlock_irq(&card->lock);
}

624
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
625
{
626 627
	iob->rc = reason;
	complete(&iob->done);
628
}
629
EXPORT_SYMBOL_GPL(qeth_notify_cmd);
630

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs4_lock);
	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs4_lock);
}

static void qeth_flush_local_addrs6(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs6_lock);
	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs6_lock);
}

659
static void qeth_flush_local_addrs(struct qeth_card *card)
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
{
	qeth_flush_local_addrs4(card);
	qeth_flush_local_addrs6(card);
}

static void qeth_add_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_add_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		addr->addr = cmd->addrs[i].addr;
		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs6_lock);
}

static void qeth_del_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
		unsigned int key = ipv4_addr_hash(addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
			if (tmp->addr.s6_addr32[3] == addr->addr) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_del_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
		u32 key = ipv6_addr_hash(&addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs6_lock);
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	bool is_local = false;
	unsigned int key;
	__be32 next_hop;

	if (hash_empty(card->local_addrs4))
		return false;

	rcu_read_lock();
	next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
	key = ipv4_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
		if (tmp->addr.s6_addr32[3] == next_hop) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	struct in6_addr *next_hop;
	bool is_local = false;
	u32 key;

	if (hash_empty(card->local_addrs6))
		return false;

	rcu_read_lock();
	next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
	key = ipv6_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
{
	struct qeth_card *card = m->private;
	struct qeth_local_addr *tmp;
	unsigned int i;

	rcu_read_lock();
	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
		seq_printf(m, "%pI6c\n", &tmp->addr);
	rcu_read_unlock();

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);

881
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
F
Frank Blaschka 已提交
882 883
		struct qeth_card *card)
{
884
	const char *ipa_name;
885
	int com = cmd->hdr.command;
886

F
Frank Blaschka 已提交
887
	ipa_name = qeth_get_ipa_cmd_name(com);
888

889
	if (rc)
890 891 892
		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
				 ipa_name, com, CARD_DEVID(card), rc,
				 qeth_get_ipa_msg(rc));
893
	else
894 895
		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
				 ipa_name, com, CARD_DEVID(card));
F
Frank Blaschka 已提交
896 897 898
}

static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
899
						struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
900
{
C
Carsten Otte 已提交
901
	QETH_CARD_TEXT(card, 5, "chkipad");
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917

	if (IS_IPA_REPLY(cmd)) {
		if (cmd->hdr.command != IPA_CMD_SETCCID &&
		    cmd->hdr.command != IPA_CMD_DELCCID &&
		    cmd->hdr.command != IPA_CMD_MODCCID &&
		    cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
		return cmd;
	}

	/* handle unsolicited event: */
	switch (cmd->hdr.command) {
	case IPA_CMD_STOPLAN:
		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
			dev_err(&card->gdev->dev,
				"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
J
Julian Wiedmann 已提交
918
				netdev_name(card->dev));
919
			schedule_work(&card->close_dev_work);
F
Frank Blaschka 已提交
920
		} else {
921 922
			dev_warn(&card->gdev->dev,
				 "The link for interface %s on CHPID 0x%X failed\n",
J
Julian Wiedmann 已提交
923
				 netdev_name(card->dev), card->info.chpid);
924
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
925
			netif_carrier_off(card->dev);
F
Frank Blaschka 已提交
926
		}
927 928 929 930
		return NULL;
	case IPA_CMD_STARTLAN:
		dev_info(&card->gdev->dev,
			 "The link for %s on CHPID 0x%X has been restored\n",
J
Julian Wiedmann 已提交
931
			 netdev_name(card->dev), card->info.chpid);
932 933 934 935 936 937 938 939 940 941 942 943 944
		if (card->info.hwtrap)
			card->info.hwtrap = 2;
		qeth_schedule_recovery(card);
		return NULL;
	case IPA_CMD_SETBRIDGEPORT_IQD:
	case IPA_CMD_SETBRIDGEPORT_OSA:
	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
		if (card->discipline->control_event_handler(card, cmd))
			return cmd;
		return NULL;
	case IPA_CMD_MODCCID:
		return cmd;
	case IPA_CMD_REGISTER_LOCAL_ADDR:
945 946 947 948 949
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);

950 951 952
		QETH_CARD_TEXT(card, 3, "irla");
		return NULL;
	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
953 954 955 956 957
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);

958 959 960 961 962
		QETH_CARD_TEXT(card, 3, "urla");
		return NULL;
	default:
		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
		return cmd;
F
Frank Blaschka 已提交
963 964 965
	}
}

966
static void qeth_clear_ipacmd_list(struct qeth_card *card)
F
Frank Blaschka 已提交
967
{
968
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
969 970
	unsigned long flags;

C
Carsten Otte 已提交
971
	QETH_CARD_TEXT(card, 4, "clipalst");
F
Frank Blaschka 已提交
972 973

	spin_lock_irqsave(&card->lock, flags);
974
	list_for_each_entry(iob, &card->cmd_waiter_list, list)
975
		qeth_notify_cmd(iob, -ECANCELED);
F
Frank Blaschka 已提交
976 977 978
	spin_unlock_irqrestore(&card->lock, flags);
}

979 980
static int qeth_check_idx_response(struct qeth_card *card,
	unsigned char *buffer)
F
Frank Blaschka 已提交
981
{
982
	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
983
	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
984
		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
985
				 buffer[4]);
C
Carsten Otte 已提交
986 987
		QETH_CARD_TEXT(card, 2, "ckidxres");
		QETH_CARD_TEXT(card, 2, " idxterm");
988 989 990
		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
991
			dev_err(&card->gdev->dev,
992 993
				"The device does not support the configured transport mode\n");
			return -EPROTONOSUPPORT;
994
		}
F
Frank Blaschka 已提交
995 996 997 998 999
		return -EIO;
	}
	return 0;
}

1000
void qeth_put_cmd(struct qeth_cmd_buffer *iob)
1001 1002 1003 1004 1005 1006
{
	if (refcount_dec_and_test(&iob->ref_count)) {
		kfree(iob->data);
		kfree(iob);
	}
}
1007
EXPORT_SYMBOL_GPL(qeth_put_cmd);
F
Frank Blaschka 已提交
1008

1009
static void qeth_release_buffer_cb(struct qeth_card *card,
1010 1011
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
1012
{
1013
	qeth_put_cmd(iob);
1014 1015
}

1016 1017
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
1018
	qeth_notify_cmd(iob, rc);
1019
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
1020 1021
}

1022 1023 1024
struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
				       unsigned int length, unsigned int ccws,
				       long timeout)
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
{
	struct qeth_cmd_buffer *iob;

	if (length > QETH_BUFSIZE)
		return NULL;

	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
	if (!iob)
		return NULL;

	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
			    GFP_KERNEL | GFP_DMA);
	if (!iob->data) {
		kfree(iob);
		return NULL;
	}

1042 1043 1044
	init_completion(&iob->done);
	spin_lock_init(&iob->lock);
	INIT_LIST_HEAD(&iob->list);
1045
	refcount_set(&iob->ref_count, 1);
1046 1047 1048 1049 1050
	iob->channel = channel;
	iob->timeout = timeout;
	iob->length = length;
	return iob;
}
1051
EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1052

1053
static void qeth_issue_next_read_cb(struct qeth_card *card,
1054 1055
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length)
F
Frank Blaschka 已提交
1056
{
1057
	struct qeth_cmd_buffer *request = NULL;
1058
	struct qeth_ipa_cmd *cmd = NULL;
1059
	struct qeth_reply *reply = NULL;
1060
	struct qeth_cmd_buffer *tmp;
F
Frank Blaschka 已提交
1061
	unsigned long flags;
1062
	int rc = 0;
F
Frank Blaschka 已提交
1063

C
Carsten Otte 已提交
1064
	QETH_CARD_TEXT(card, 4, "sndctlcb");
1065 1066 1067 1068 1069 1070
	rc = qeth_check_idx_response(card, iob->data);
	switch (rc) {
	case 0:
		break;
	case -EIO:
		qeth_schedule_recovery(card);
1071
		fallthrough;
1072
	default:
1073
		qeth_clear_ipacmd_list(card);
1074
		goto err_idx;
F
Frank Blaschka 已提交
1075 1076
	}

1077 1078
	cmd = __ipa_reply(iob);
	if (cmd) {
1079
		cmd = qeth_check_ipa_data(card, cmd);
1080 1081 1082 1083 1084 1085 1086
		if (!cmd)
			goto out;
		if (IS_OSN(card) && card->osn_info.assist_cb &&
		    cmd->hdr.command != IPA_CMD_STARTLAN) {
			card->osn_info.assist_cb(card->dev, cmd);
			goto out;
		}
F
Frank Blaschka 已提交
1087 1088
	}

1089
	/* match against pending cmd requests */
F
Frank Blaschka 已提交
1090
	spin_lock_irqsave(&card->lock, flags);
1091
	list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
1092
		if (tmp->match && tmp->match(tmp, iob)) {
1093
			request = tmp;
1094
			/* take the object outside the lock */
1095
			qeth_get_cmd(request);
1096
			break;
F
Frank Blaschka 已提交
1097 1098 1099
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);
1100

1101
	if (!request)
1102 1103
		goto out;

1104
	reply = &request->reply;
1105 1106
	if (!reply->callback) {
		rc = 0;
1107 1108 1109
		goto no_callback;
	}

1110 1111
	spin_lock_irqsave(&request->lock, flags);
	if (request->rc)
1112
		/* Bail out when the requestor has already left: */
1113
		rc = request->rc;
1114 1115 1116
	else
		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
							(unsigned long)iob);
1117
	spin_unlock_irqrestore(&request->lock, flags);
1118

1119
no_callback:
1120
	if (rc <= 0)
1121 1122
		qeth_notify_cmd(request, rc);
	qeth_put_cmd(request);
F
Frank Blaschka 已提交
1123 1124 1125 1126
out:
	memcpy(&card->seqno.pdu_hdr_ack,
		QETH_PDU_HEADER_SEQ_NO(iob->data),
		QETH_SEQ_NO_LENGTH);
1127
	__qeth_issue_next_read(card);
1128 1129
err_idx:
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
1130 1131 1132 1133 1134 1135
}

static int qeth_set_thread_start_bit(struct qeth_card *card,
		unsigned long thread)
{
	unsigned long flags;
1136
	int rc = 0;
F
Frank Blaschka 已提交
1137 1138

	spin_lock_irqsave(&card->thread_mask_lock, flags);
1139 1140 1141 1142 1143 1144
	if (!(card->thread_allowed_mask & thread))
		rc = -EPERM;
	else if (card->thread_start_mask & thread)
		rc = -EBUSY;
	else
		card->thread_start_mask |= thread;
F
Frank Blaschka 已提交
1145
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1146 1147

	return rc;
F
Frank Blaschka 已提交
1148 1149
}

1150 1151
static void qeth_clear_thread_start_bit(struct qeth_card *card,
					unsigned long thread)
F
Frank Blaschka 已提交
1152 1153 1154 1155 1156 1157 1158 1159 1160
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_start_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}

1161 1162
static void qeth_clear_thread_running_bit(struct qeth_card *card,
					  unsigned long thread)
F
Frank Blaschka 已提交
1163 1164 1165 1166 1167 1168
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_running_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1169
	wake_up_all(&card->wait_q);
F
Frank Blaschka 已提交
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
}

static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	if (card->thread_start_mask & thread) {
		if ((card->thread_allowed_mask & thread) &&
		    !(card->thread_running_mask & thread)) {
			rc = 1;
			card->thread_start_mask &= ~thread;
			card->thread_running_mask |= thread;
		} else
			rc = -EPERM;
	}
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1191
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
F
Frank Blaschka 已提交
1192 1193 1194 1195 1196 1197 1198 1199
{
	int rc = 0;

	wait_event(card->wait_q,
		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
	return rc;
}

1200
int qeth_schedule_recovery(struct qeth_card *card)
F
Frank Blaschka 已提交
1201
{
1202 1203
	int rc;

C
Carsten Otte 已提交
1204
	QETH_CARD_TEXT(card, 2, "startrec");
1205 1206 1207

	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
	if (!rc)
F
Frank Blaschka 已提交
1208
		schedule_work(&card->kernel_thread_starter);
1209 1210

	return rc;
F
Frank Blaschka 已提交
1211 1212
}

1213 1214
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
			    struct irb *irb)
F
Frank Blaschka 已提交
1215 1216 1217 1218 1219
{
	int dstat, cstat;
	char *sense;

	sense = (char *) irb->ecw;
1220 1221
	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;
F
Frank Blaschka 已提交
1222 1223 1224 1225

	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
C
Carsten Otte 已提交
1226
		QETH_CARD_TEXT(card, 2, "CGENCHK");
1227 1228
		dev_warn(&cdev->dev, "The qeth device driver "
			"failed to recover an error on the device\n");
1229 1230
		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
				 CCW_DEVID(cdev), dstat, cstat);
F
Frank Blaschka 已提交
1231 1232
		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
				16, 1, irb, 64, 1);
1233
		return -EIO;
F
Frank Blaschka 已提交
1234 1235 1236 1237 1238
	}

	if (dstat & DEV_STAT_UNIT_CHECK) {
		if (sense[SENSE_RESETTING_EVENT_BYTE] &
		    SENSE_RESETTING_EVENT_FLAG) {
C
Carsten Otte 已提交
1239
			QETH_CARD_TEXT(card, 2, "REVIND");
1240
			return -EIO;
F
Frank Blaschka 已提交
1241 1242 1243
		}
		if (sense[SENSE_COMMAND_REJECT_BYTE] &
		    SENSE_COMMAND_REJECT_FLAG) {
C
Carsten Otte 已提交
1244
			QETH_CARD_TEXT(card, 2, "CMDREJi");
1245
			return -EIO;
F
Frank Blaschka 已提交
1246 1247
		}
		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
C
Carsten Otte 已提交
1248
			QETH_CARD_TEXT(card, 2, "AFFE");
1249
			return -EIO;
F
Frank Blaschka 已提交
1250 1251
		}
		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
C
Carsten Otte 已提交
1252
			QETH_CARD_TEXT(card, 2, "ZEROSEN");
F
Frank Blaschka 已提交
1253 1254
			return 0;
		}
C
Carsten Otte 已提交
1255
		QETH_CARD_TEXT(card, 2, "DGENCHK");
1256
		return -EIO;
F
Frank Blaschka 已提交
1257 1258 1259 1260
	}
	return 0;
}

1261
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1262
				struct irb *irb)
F
Frank Blaschka 已提交
1263
{
1264
	if (!IS_ERR(irb))
F
Frank Blaschka 已提交
1265 1266 1267 1268
		return 0;

	switch (PTR_ERR(irb)) {
	case -EIO:
1269 1270
		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
				 CCW_DEVID(cdev));
C
Carsten Otte 已提交
1271 1272
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1273
		return -EIO;
F
Frank Blaschka 已提交
1274
	case -ETIMEDOUT:
1275 1276
		dev_warn(&cdev->dev, "A hardware operation timed out"
			" on the device\n");
C
Carsten Otte 已提交
1277 1278
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1279
		return -ETIMEDOUT;
F
Frank Blaschka 已提交
1280
	default:
1281 1282
		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
				 PTR_ERR(irb), CCW_DEVID(cdev));
C
Carsten Otte 已提交
1283 1284
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT(card, 2, "  rc???");
1285
		return PTR_ERR(irb);
F
Frank Blaschka 已提交
1286 1287 1288 1289 1290 1291 1292 1293
	}
}

static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
		struct irb *irb)
{
	int rc;
	int cstat, dstat;
1294
	struct qeth_cmd_buffer *iob = NULL;
1295
	struct ccwgroup_device *gdev;
F
Frank Blaschka 已提交
1296 1297 1298
	struct qeth_channel *channel;
	struct qeth_card *card;

1299 1300 1301
	/* while we hold the ccwdev lock, this stays valid: */
	gdev = dev_get_drvdata(&cdev->dev);
	card = dev_get_drvdata(&gdev->dev);
F
Frank Blaschka 已提交
1302

C
Carsten Otte 已提交
1303 1304
	QETH_CARD_TEXT(card, 5, "irq");

F
Frank Blaschka 已提交
1305 1306
	if (card->read.ccwdev == cdev) {
		channel = &card->read;
C
Carsten Otte 已提交
1307
		QETH_CARD_TEXT(card, 5, "read");
F
Frank Blaschka 已提交
1308 1309
	} else if (card->write.ccwdev == cdev) {
		channel = &card->write;
C
Carsten Otte 已提交
1310
		QETH_CARD_TEXT(card, 5, "write");
F
Frank Blaschka 已提交
1311 1312
	} else {
		channel = &card->data;
C
Carsten Otte 已提交
1313
		QETH_CARD_TEXT(card, 5, "data");
F
Frank Blaschka 已提交
1314
	}
1315

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
	if (intparm == 0) {
		QETH_CARD_TEXT(card, 5, "irqunsol");
	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
		QETH_CARD_TEXT(card, 5, "irqunexp");

		dev_err(&cdev->dev,
			"Received IRQ with intparm %lx, expected %px\n",
			intparm, channel->active_cmd);
		if (channel->active_cmd)
			qeth_cancel_cmd(channel->active_cmd, -EIO);
	} else {
		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
	}

	channel->active_cmd = NULL;
1331
	qeth_unlock_channel(card, channel);
1332

1333
	rc = qeth_check_irb_error(card, cdev, irb);
1334
	if (rc) {
1335 1336
		/* IO was terminated, free its resources. */
		if (iob)
1337
			qeth_cancel_cmd(iob, rc);
1338 1339 1340
		return;
	}

1341
	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
F
Frank Blaschka 已提交
1342
		channel->state = CH_STATE_STOPPED;
1343 1344
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1345

1346
	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
F
Frank Blaschka 已提交
1347
		channel->state = CH_STATE_HALTED;
1348 1349
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1350

1351 1352 1353 1354
	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
					  SCSW_FCTL_HALT_FUNC))) {
		qeth_cancel_cmd(iob, -ECANCELED);
		iob = NULL;
F
Frank Blaschka 已提交
1355
	}
1356 1357 1358 1359

	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;

F
Frank Blaschka 已提交
1360 1361 1362 1363
	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
	    (dstat & DEV_STAT_UNIT_CHECK) ||
	    (cstat)) {
		if (irb->esw.esw0.erw.cons) {
1364 1365 1366
			dev_warn(&channel->ccwdev->dev,
				"The qeth device driver failed to recover "
				"an error on the device\n");
1367 1368 1369
			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
					 CCW_DEVID(channel->ccwdev), cstat,
					 dstat);
F
Frank Blaschka 已提交
1370 1371 1372 1373 1374
			print_hex_dump(KERN_WARNING, "qeth: irb ",
				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
			print_hex_dump(KERN_WARNING, "qeth: sense data ",
				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
		}
1375

1376
		rc = qeth_get_problem(card, cdev, irb);
F
Frank Blaschka 已提交
1377
		if (rc) {
1378
			card->read_or_write_problem = 1;
1379
			if (iob)
1380
				qeth_cancel_cmd(iob, rc);
1381
			qeth_clear_ipacmd_list(card);
F
Frank Blaschka 已提交
1382
			qeth_schedule_recovery(card);
1383
			return;
F
Frank Blaschka 已提交
1384 1385 1386
		}
	}

1387 1388 1389 1390
	if (iob) {
		/* sanity check: */
		if (irb->scsw.cmd.count > iob->length) {
			qeth_cancel_cmd(iob, -EIO);
1391
			return;
1392 1393 1394 1395 1396
		}
		if (iob->callback)
			iob->callback(card, iob,
				      iob->length - irb->scsw.cmd.count);
	}
F
Frank Blaschka 已提交
1397 1398
}

1399
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1400
		struct qeth_qdio_out_buffer *buf,
1401
		enum iucv_tx_notify notification)
F
Frank Blaschka 已提交
1402 1403 1404
{
	struct sk_buff *skb;

1405
	skb_queue_walk(&buf->skb_list, skb) {
1406 1407
		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1408 1409
		if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
			iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1410 1411 1412
	}
}

1413 1414
static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
				 int budget)
1415
{
1416
	struct qeth_qdio_out_q *queue = buf->q;
1417 1418
	struct sk_buff *skb;

1419
	/* release may never happen from within CQ tasklet scope */
S
Stefan Raspl 已提交
1420
	WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
F
Frank Blaschka 已提交
1421

1422
	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1423 1424 1425 1426 1427 1428 1429 1430
		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);

	/* Empty buffer? */
	if (buf->next_element_to_fill == 0)
		return;

	QETH_TXQ_STAT_INC(queue, bufs);
	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1431 1432 1433 1434 1435 1436 1437
	if (error) {
		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
	} else {
		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
	}

1438 1439 1440 1441 1442 1443
	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
		unsigned int bytes = qdisc_pkt_len(skb);
		bool is_tso = skb_is_gso(skb);
		unsigned int packets;

		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1444
		if (!error) {
1445 1446 1447 1448 1449 1450 1451 1452 1453
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
			if (skb_is_nonlinear(skb))
				QETH_TXQ_STAT_INC(queue, skbs_sg);
			if (is_tso) {
				QETH_TXQ_STAT_INC(queue, skbs_tso);
				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
			}
		}
1454

1455
		napi_consume_skb(skb, budget);
1456
	}
1457 1458 1459
}

static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1460
				     struct qeth_qdio_out_buffer *buf,
1461
				     bool error, int budget)
1462 1463 1464 1465 1466 1467 1468
{
	int i;

	/* is PCI flag set on buffer? */
	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
		atomic_dec(&queue->set_pci_flags_count);

1469
	qeth_tx_complete_buf(buf, error, budget);
1470

1471
	for (i = 0; i < queue->max_elements; ++i) {
1472 1473 1474 1475
		void *data = phys_to_virt(buf->buffer->element[i].addr);

		if (data && buf->is_header[i])
			kmem_cache_free(qeth_core_header_cache, data);
1476
		buf->is_header[i] = 0;
F
Frank Blaschka 已提交
1477
	}
1478

1479
	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
F
Frank Blaschka 已提交
1480
	buf->next_element_to_fill = 0;
1481
	buf->frames = 0;
1482
	buf->bytes = 0;
1483
	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1484 1485
}

1486
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1487 1488 1489 1490 1491 1492
{
	int j;

	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (!q->bufs[j])
			continue;
1493
		qeth_cleanup_handled_pending(q, j, 1);
1494
		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1495 1496 1497 1498 1499
		if (free) {
			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
			q->bufs[j] = NULL;
		}
	}
F
Frank Blaschka 已提交
1500 1501
}

1502
static void qeth_drain_output_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
1503
{
1504
	int i;
F
Frank Blaschka 已提交
1505

C
Carsten Otte 已提交
1506
	QETH_CARD_TEXT(card, 2, "clearqdbf");
F
Frank Blaschka 已提交
1507
	/* clear outbound buffers to free skbs */
1508
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1509 1510
		if (card->qdio.out_qs[i])
			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1511
	}
F
Frank Blaschka 已提交
1512 1513
}

1514
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1515
{
1516
	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1517

1518
	if (card->qdio.no_out_queues == max)
1519
		return;
1520

1521
	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1522
		qeth_free_qdio_queues(card);
1523

1524
	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1525 1526
		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");

1527
	card->qdio.no_out_queues = max;
1528 1529
}

1530
static int qeth_update_from_chp_desc(struct qeth_card *card)
F
Frank Blaschka 已提交
1531 1532
{
	struct ccw_device *ccwdev;
1533
	struct channel_path_desc_fmt0 *chp_dsc;
F
Frank Blaschka 已提交
1534

1535
	QETH_CARD_TEXT(card, 2, "chp_desc");
F
Frank Blaschka 已提交
1536 1537

	ccwdev = card->data.ccwdev;
1538 1539
	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
	if (!chp_dsc)
1540
		return -ENOMEM;
1541 1542 1543

	card->info.func_level = 0x4100 + chp_dsc->desc;

1544 1545
	if (IS_OSD(card) || IS_OSX(card))
		/* CHPP field bit 6 == 1 -> single queue */
1546
		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1547

1548
	kfree(chp_dsc);
1549 1550
	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1551
	return 0;
F
Frank Blaschka 已提交
1552 1553 1554 1555
}

static void qeth_init_qdio_info(struct qeth_card *card)
{
1556
	QETH_CARD_TEXT(card, 4, "intqdinf");
F
Frank Blaschka 已提交
1557
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1558 1559 1560
	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;

F
Frank Blaschka 已提交
1561
	/* inbound */
1562
	card->qdio.no_in_queues = 1;
F
Frank Blaschka 已提交
1563
	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1564
	if (IS_IQD(card))
1565 1566 1567
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
	else
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
F
Frank Blaschka 已提交
1568 1569 1570 1571 1572
	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}

1573
static void qeth_set_initial_options(struct qeth_card *card)
F
Frank Blaschka 已提交
1574 1575 1576
{
	card->options.route4.type = NO_ROUTER;
	card->options.route6.type = NO_ROUTER;
E
Einar Lueck 已提交
1577
	card->options.isolation = ISOLATION_MODE_NONE;
1578
	card->options.cq = QETH_CQ_DISABLED;
1579
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
F
Frank Blaschka 已提交
1580 1581 1582 1583 1584 1585 1586 1587
}

static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
C
Carsten Otte 已提交
1588
	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
F
Frank Blaschka 已提交
1589 1590 1591 1592 1593 1594 1595 1596
			(u8) card->thread_start_mask,
			(u8) card->thread_allowed_mask,
			(u8) card->thread_running_mask);
	rc = (card->thread_start_mask & thread);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1597
static int qeth_do_reset(void *data);
F
Frank Blaschka 已提交
1598 1599
static void qeth_start_kernel_thread(struct work_struct *work)
{
1600
	struct task_struct *ts;
F
Frank Blaschka 已提交
1601 1602
	struct qeth_card *card = container_of(work, struct qeth_card,
					kernel_thread_starter);
1603
	QETH_CARD_TEXT(card, 2, "strthrd");
F
Frank Blaschka 已提交
1604 1605 1606 1607

	if (card->read.state != CH_STATE_UP &&
	    card->write.state != CH_STATE_UP)
		return;
1608
	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1609
		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1610 1611 1612 1613 1614 1615
		if (IS_ERR(ts)) {
			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
			qeth_clear_thread_running_bit(card,
				QETH_RECOVER_THREAD);
		}
	}
F
Frank Blaschka 已提交
1616 1617
}

1618
static void qeth_buffer_reclaim_work(struct work_struct *);
1619
static void qeth_setup_card(struct qeth_card *card)
F
Frank Blaschka 已提交
1620
{
1621
	QETH_CARD_TEXT(card, 2, "setupcrd");
F
Frank Blaschka 已提交
1622

1623
	card->info.type = CARD_RDEV(card)->id.driver_info;
F
Frank Blaschka 已提交
1624 1625 1626
	card->state = CARD_STATE_DOWN;
	spin_lock_init(&card->lock);
	spin_lock_init(&card->thread_mask_lock);
1627
	mutex_init(&card->conf_mutex);
1628
	mutex_init(&card->discipline_mutex);
F
Frank Blaschka 已提交
1629 1630 1631
	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
	INIT_LIST_HEAD(&card->cmd_waiter_list);
	init_waitqueue_head(&card->wait_q);
1632
	qeth_set_initial_options(card);
F
Frank Blaschka 已提交
1633 1634 1635
	/* IP address takeover */
	INIT_LIST_HEAD(&card->ipato.entries);
	qeth_init_qdio_info(card);
1636
	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
S
Stefan Raspl 已提交
1637
	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1638
	hash_init(card->rx_mode_addrs);
1639 1640 1641 1642
	hash_init(card->local_addrs4);
	hash_init(card->local_addrs6);
	spin_lock_init(&card->local_addrs4_lock);
	spin_lock_init(&card->local_addrs6_lock);
F
Frank Blaschka 已提交
1643 1644
}

1645 1646 1647 1648
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
	struct qeth_card *card = container_of(slr, struct qeth_card,
					qeth_service_level);
1649 1650 1651
	if (card->info.mcl_level[0])
		seq_printf(m, "qeth: %s firmware level %s\n",
			CARD_BUS_ID(card), card->info.mcl_level);
1652 1653
}

1654
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
F
Frank Blaschka 已提交
1655 1656 1657
{
	struct qeth_card *card;

1658
	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1659
	card = kzalloc(sizeof(*card), GFP_KERNEL);
F
Frank Blaschka 已提交
1660
	if (!card)
1661
		goto out;
1662
	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1663 1664

	card->gdev = gdev;
1665
	dev_set_drvdata(&gdev->dev, card);
1666 1667 1668
	CARD_RDEV(card) = gdev->cdev[0];
	CARD_WDEV(card) = gdev->cdev[1];
	CARD_DDEV(card) = gdev->cdev[2];
1669

1670 1671
	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
						 dev_name(&gdev->dev));
1672 1673
	if (!card->event_wq)
		goto out_wq;
1674 1675 1676 1677

	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
	if (!card->read_cmd)
		goto out_read_cmd;
1678

1679 1680 1681 1682 1683
	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
					   qeth_debugfs_root);
	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
			    &qeth_debugfs_local_addr_fops);

1684 1685
	card->qeth_service_level.seq_print = qeth_core_sl_print;
	register_service_level(&card->qeth_service_level);
F
Frank Blaschka 已提交
1686
	return card;
1687

1688
out_read_cmd:
1689 1690
	destroy_workqueue(card->event_wq);
out_wq:
1691
	dev_set_drvdata(&gdev->dev, NULL);
1692 1693 1694
	kfree(card);
out:
	return NULL;
F
Frank Blaschka 已提交
1695 1696
}

1697 1698
static int qeth_clear_channel(struct qeth_card *card,
			      struct qeth_channel *channel)
F
Frank Blaschka 已提交
1699 1700 1701
{
	int rc;

C
Carsten Otte 已提交
1702
	QETH_CARD_TEXT(card, 3, "clearch");
J
Julian Wiedmann 已提交
1703
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1704
	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1705
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_STOPPED)
		return -ETIME;
	channel->state = CH_STATE_DOWN;
	return 0;
}

1719 1720
static int qeth_halt_channel(struct qeth_card *card,
			     struct qeth_channel *channel)
F
Frank Blaschka 已提交
1721 1722 1723
{
	int rc;

C
Carsten Otte 已提交
1724
	QETH_CARD_TEXT(card, 3, "haltch");
J
Julian Wiedmann 已提交
1725
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1726
	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1727
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_HALTED)
		return -ETIME;
	return 0;
}

1740
static int qeth_stop_channel(struct qeth_channel *channel)
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	rc = ccw_device_set_offline(cdev);

	spin_lock_irq(get_ccwdev_lock(cdev));
	if (channel->active_cmd) {
		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
			channel->active_cmd);
		channel->active_cmd = NULL;
	}
1753
	cdev->handler = NULL;
1754 1755 1756 1757 1758
	spin_unlock_irq(get_ccwdev_lock(cdev));

	return rc;
}

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
static int qeth_start_channel(struct qeth_channel *channel)
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	channel->state = CH_STATE_DOWN;
	atomic_set(&channel->irq_pending, 0);

	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = qeth_irq;
	spin_unlock_irq(get_ccwdev_lock(cdev));

	rc = ccw_device_set_online(cdev);
	if (rc)
		goto err;

	return 0;

err:
	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = NULL;
	spin_unlock_irq(get_ccwdev_lock(cdev));
	return rc;
}

F
Frank Blaschka 已提交
1784 1785 1786 1787
static int qeth_halt_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1788
	QETH_CARD_TEXT(card, 3, "haltchs");
1789 1790 1791
	rc1 = qeth_halt_channel(card, &card->read);
	rc2 = qeth_halt_channel(card, &card->write);
	rc3 = qeth_halt_channel(card, &card->data);
F
Frank Blaschka 已提交
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1803
	QETH_CARD_TEXT(card, 3, "clearchs");
1804 1805 1806
	rc1 = qeth_clear_channel(card, &card->read);
	rc2 = qeth_clear_channel(card, &card->write);
	rc3 = qeth_clear_channel(card, &card->data);
F
Frank Blaschka 已提交
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
	int rc = 0;

C
Carsten Otte 已提交
1818
	QETH_CARD_TEXT(card, 3, "clhacrd");
F
Frank Blaschka 已提交
1819 1820 1821 1822 1823 1824 1825 1826

	if (halt)
		rc = qeth_halt_channels(card);
	if (rc)
		return rc;
	return qeth_clear_channels(card);
}

1827
static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
F
Frank Blaschka 已提交
1828 1829 1830
{
	int rc = 0;

C
Carsten Otte 已提交
1831
	QETH_CARD_TEXT(card, 3, "qdioclr");
F
Frank Blaschka 已提交
1832 1833 1834
	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
		QETH_QDIO_CLEANING)) {
	case QETH_QDIO_ESTABLISHED:
1835
		if (IS_IQD(card))
J
Jan Glauber 已提交
1836
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1837 1838
				QDIO_FLAG_CLEANUP_USING_HALT);
		else
J
Jan Glauber 已提交
1839
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1840 1841
				QDIO_FLAG_CLEANUP_USING_CLEAR);
		if (rc)
C
Carsten Otte 已提交
1842
			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
F
Frank Blaschka 已提交
1843 1844 1845 1846 1847 1848 1849 1850 1851
		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
		break;
	case QETH_QDIO_CLEANING:
		return rc;
	default:
		break;
	}
	rc = qeth_clear_halt_card(card, use_halt);
	if (rc)
C
Carsten Otte 已提交
1852
		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
F
Frank Blaschka 已提交
1853 1854 1855
	return rc;
}

1856 1857 1858 1859 1860 1861 1862 1863 1864
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
	struct diag26c_vnic_resp *response = NULL;
	struct diag26c_vnic_req *request = NULL;
	struct ccw_dev_id id;
	char userid[80];
	int rc = 0;

1865
	QETH_CARD_TEXT(card, 2, "vmlayer");
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907

	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
	if (rc)
		goto out;

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	ccw_device_get_id(CARD_RDEV(card), &id);
	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION6_VM65918;
	request->req_format = DIAG26C_VNIC_INFO;
	ASCEBC(userid, 8);
	memcpy(&request->sys_name, userid, 8);
	request->devno = id.devno;

	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	if (rc)
		goto out;
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
		goto out;
	}

	if (response->protocol == VNIC_INFO_PROT_L2)
		disc = QETH_DISCIPLINE_LAYER2;
	else if (response->protocol == VNIC_INFO_PROT_L3)
		disc = QETH_DISCIPLINE_LAYER3;

out:
	kfree(response);
	kfree(request);
	if (rc)
1908
		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1909 1910 1911
	return disc;
}

1912 1913 1914
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
1915 1916
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;

1917
	if (IS_OSM(card) || IS_OSN(card))
1918
		disc = QETH_DISCIPLINE_LAYER2;
1919 1920 1921
	else if (IS_VM_NIC(card))
		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
				      qeth_vm_detect_layer(card);
1922 1923 1924

	switch (disc) {
	case QETH_DISCIPLINE_LAYER2:
1925
		QETH_CARD_TEXT(card, 3, "force l2");
1926 1927
		break;
	case QETH_DISCIPLINE_LAYER3:
1928
		QETH_CARD_TEXT(card, 3, "force l3");
1929 1930
		break;
	default:
1931
		QETH_CARD_TEXT(card, 3, "force no");
1932 1933
	}

1934
	return disc;
1935 1936
}

1937
static void qeth_set_blkt_defaults(struct qeth_card *card)
1938
{
1939
	QETH_CARD_TEXT(card, 2, "cfgblkt");
1940

1941
	if (card->info.use_v1_blkt) {
1942 1943 1944
		card->info.blkt.time_total = 0;
		card->info.blkt.inter_packet = 0;
		card->info.blkt.inter_packet_jumbo = 0;
1945 1946 1947 1948
	} else {
		card->info.blkt.time_total = 250;
		card->info.blkt.inter_packet = 5;
		card->info.blkt.inter_packet_jumbo = 15;
1949
	}
F
Frank Blaschka 已提交
1950 1951
}

1952
static void qeth_idx_init(struct qeth_card *card)
F
Frank Blaschka 已提交
1953
{
1954 1955
	memset(&card->seqno, 0, sizeof(card->seqno));

F
Frank Blaschka 已提交
1956 1957 1958 1959 1960 1961
	card->token.issuer_rm_w = 0x00010103UL;
	card->token.cm_filter_w = 0x00010108UL;
	card->token.cm_connection_w = 0x0001010aUL;
	card->token.ulp_filter_w = 0x0001010bUL;
	card->token.ulp_connection_w = 0x0001010dUL;

1962 1963
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
1964
		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1965 1966
		break;
	case QETH_CARD_TYPE_OSD:
1967
	case QETH_CARD_TYPE_OSN:
1968 1969 1970 1971
		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
		break;
	default:
		break;
F
Frank Blaschka 已提交
1972 1973 1974
	}
}

1975
static void qeth_idx_finalize_cmd(struct qeth_card *card,
1976
				  struct qeth_cmd_buffer *iob)
1977 1978 1979 1980 1981 1982 1983
{
	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
	       QETH_SEQ_NO_LENGTH);
	if (iob->channel == &card->write)
		card->seqno.trans_hdr++;
}

F
Frank Blaschka 已提交
1984 1985 1986 1987 1988 1989 1990 1991 1992
static int qeth_peer_func_level(int level)
{
	if ((level & 0xff) == 8)
		return (level & 0xff) + 0x400;
	if (((level >> 8) & 3) == 1)
		return (level & 0xff) + 0x200;
	return level;
}

1993
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1994
				  struct qeth_cmd_buffer *iob)
F
Frank Blaschka 已提交
1995
{
1996
	qeth_idx_finalize_cmd(card, iob);
F
Frank Blaschka 已提交
1997 1998 1999 2000 2001 2002

	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
	card->seqno.pdu_hdr++;
	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2003 2004

	iob->callback = qeth_release_buffer_cb;
F
Frank Blaschka 已提交
2005 2006
}

2007 2008 2009 2010 2011 2012 2013
static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	/* MPC cmds are issued strictly in sequence. */
	return !IS_IPA(reply->data);
}

2014
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2015
						  const void *data,
2016
						  unsigned int data_length)
2017 2018 2019
{
	struct qeth_cmd_buffer *iob;

2020 2021 2022 2023 2024 2025 2026 2027
	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
	if (!iob)
		return NULL;

	memcpy(iob->data, data, data_length);
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
		       iob->data);
	iob->finalize = qeth_mpc_finalize_cmd;
2028
	iob->match = qeth_mpc_match_reply;
2029 2030 2031
	return iob;
}

E
Eugene Crosser 已提交
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
/**
 * qeth_send_control_data() -	send control command to the card
 * @card:			qeth_card structure pointer
 * @iob:			qeth_cmd_buffer pointer
 * @reply_cb:			callback function pointer
 * @cb_card:			pointer to the qeth_card structure
 * @cb_reply:			pointer to the qeth_reply structure
 * @cb_cmd:			pointer to the original iob for non-IPA
 *				commands, or to the qeth_ipa_cmd structure
 *				for the IPA commands.
 * @reply_param:		private pointer passed to the callback
 *
 * Callback function gets called one or more times, with cb_cmd
 * pointing to the response returned by the hardware. Callback
2046 2047 2048 2049 2050
 * function must return
 *   > 0 if more reply blocks are expected,
 *     0 if the last or only reply block is received, and
 *   < 0 on error.
 * Callback function can get the value of the reply_param pointer from the
E
Eugene Crosser 已提交
2051 2052 2053
 * field 'param' of the structure qeth_reply.
 */

2054
static int qeth_send_control_data(struct qeth_card *card,
2055 2056 2057 2058 2059
				  struct qeth_cmd_buffer *iob,
				  int (*reply_cb)(struct qeth_card *cb_card,
						  struct qeth_reply *cb_reply,
						  unsigned long cb_cmd),
				  void *reply_param)
F
Frank Blaschka 已提交
2060
{
2061
	struct qeth_channel *channel = iob->channel;
2062
	struct qeth_reply *reply = &iob->reply;
2063
	long timeout = iob->timeout;
F
Frank Blaschka 已提交
2064 2065
	int rc;

C
Carsten Otte 已提交
2066
	QETH_CARD_TEXT(card, 2, "sendctl");
F
Frank Blaschka 已提交
2067 2068 2069

	reply->callback = reply_cb;
	reply->param = reply_param;
2070

2071 2072 2073 2074
	timeout = wait_event_interruptible_timeout(card->wait_q,
						   qeth_trylock_channel(channel),
						   timeout);
	if (timeout <= 0) {
2075
		qeth_put_cmd(iob);
2076 2077
		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
	}
F
Frank Blaschka 已提交
2078

2079
	if (iob->finalize)
2080 2081
		iob->finalize(card, iob);
	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2082

2083
	qeth_enqueue_cmd(card, iob);
2084

2085 2086 2087
	/* This pairs with iob->callback, and keeps the iob alive after IO: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
2088
	QETH_CARD_TEXT(card, 6, "noirqpnd");
J
Julian Wiedmann 已提交
2089
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2090
	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2091
				      (addr_t) iob, 0, 0, timeout);
2092 2093
	if (!rc)
		channel->active_cmd = iob;
J
Julian Wiedmann 已提交
2094
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
2095
	if (rc) {
2096 2097
		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
				 CARD_DEVID(card), rc);
C
Carsten Otte 已提交
2098
		QETH_CARD_TEXT_(card, 2, " err%d", rc);
2099
		qeth_dequeue_cmd(card, iob);
2100
		qeth_put_cmd(iob);
2101
		qeth_unlock_channel(card, channel);
2102
		goto out;
F
Frank Blaschka 已提交
2103
	}
2104

2105
	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2106 2107 2108
							    timeout);
	if (timeout <= 0)
		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2109

2110
	qeth_dequeue_cmd(card, iob);
2111 2112 2113

	if (reply_cb) {
		/* Wait until the callback for a late reply has completed: */
2114
		spin_lock_irq(&iob->lock);
2115 2116
		if (rc)
			/* Zap any callback that's still pending: */
2117 2118
			iob->rc = rc;
		spin_unlock_irq(&iob->lock);
2119 2120
	}

2121
	if (!rc)
2122
		rc = iob->rc;
2123 2124 2125

out:
	qeth_put_cmd(iob);
2126
	return rc;
F
Frank Blaschka 已提交
2127 2128
}

2129 2130 2131 2132 2133 2134
struct qeth_node_desc {
	struct node_descriptor nd1;
	struct node_descriptor nd2;
	struct node_descriptor nd3;
};

2135
static void qeth_read_conf_data_cb(struct qeth_card *card,
2136 2137
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
2138
{
2139
	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2140
	int rc = 0;
2141
	u8 *tag;
2142 2143

	QETH_CARD_TEXT(card, 2, "cfgunit");
2144 2145 2146 2147 2148 2149

	if (data_length < sizeof(*nd)) {
		rc = -EINVAL;
		goto out;
	}

2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
			       nd->nd1.plant[1] == _ascebc['M'];
	tag = (u8 *)&nd->nd1.tag;
	card->info.chpid = tag[0];
	card->info.unit_addr2 = tag[1];

	tag = (u8 *)&nd->nd2.tag;
	card->info.cula = tag[1];

	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
				 nd->nd3.model[1] == 0xF0 &&
				 nd->nd3.model[2] >= 0xF1 &&
				 nd->nd3.model[2] <= 0xF4;
2163

2164
out:
2165
	qeth_notify_cmd(iob, rc);
2166
	qeth_put_cmd(iob);
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
}

static int qeth_read_conf_data(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->data;
	struct qeth_cmd_buffer *iob;
	struct ciw *ciw;

	/* scan for RCD command in extended SenseID data */
	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
	if (!ciw || ciw->cmd == 0)
		return -EOPNOTSUPP;
2179 2180
	if (ciw->count < sizeof(struct qeth_node_desc))
		return -EINVAL;
2181 2182 2183 2184 2185 2186 2187 2188 2189

	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
	if (!iob)
		return -ENOMEM;

	iob->callback = qeth_read_conf_data_cb;
	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
		       iob->data);

2190
	return qeth_send_control_data(card, iob, NULL, NULL);
2191 2192
}

2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
static int qeth_idx_check_activate_response(struct qeth_card *card,
					    struct qeth_channel *channel,
					    struct qeth_cmd_buffer *iob)
{
	int rc;

	rc = qeth_check_idx_response(card, iob->data);
	if (rc)
		return rc;

	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
		return 0;

	/* negative reply: */
2207 2208
	QETH_CARD_TEXT_(card, 2, "idxneg%c",
			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226

	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
	case QETH_IDX_ACT_ERR_EXCL:
		dev_err(&channel->ccwdev->dev,
			"The adapter is used exclusively by another host\n");
		return -EBUSY;
	case QETH_IDX_ACT_ERR_AUTH:
	case QETH_IDX_ACT_ERR_AUTH_USER:
		dev_err(&channel->ccwdev->dev,
			"Setting the device online failed because of insufficient authorization\n");
		return -EPERM;
	default:
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
				 CCW_DEVID(channel->ccwdev));
		return -EIO;
	}
}

2227
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2228 2229
					      struct qeth_cmd_buffer *iob,
					      unsigned int data_length)
2230
{
2231
	struct qeth_channel *channel = iob->channel;
2232 2233 2234
	u16 peer_level;
	int rc;

2235
	QETH_CARD_TEXT(card, 2, "idxrdcb");
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
		goto out;
	}

	memcpy(&card->token.issuer_rm_r,
	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	memcpy(&card->info.mcl_level[0],
	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);

out:
2257
	qeth_notify_cmd(iob, rc);
2258
	qeth_put_cmd(iob);
2259 2260
}

2261
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2262 2263
					       struct qeth_cmd_buffer *iob,
					       unsigned int data_length)
2264
{
2265
	struct qeth_channel *channel = iob->channel;
2266 2267 2268
	u16 peer_level;
	int rc;

2269
	QETH_CARD_TEXT(card, 2, "idxwrcb");
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if ((peer_level & ~0x0100) !=
	    qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
	}

out:
2285
	qeth_notify_cmd(iob, rc);
2286
	qeth_put_cmd(iob);
2287 2288 2289 2290 2291 2292 2293
}

static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
					struct qeth_cmd_buffer *iob)
{
	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
	u8 port = ((u8)card->dev->dev_port) | 0x80;
2294
	struct ccw1 *ccw = __ccw_from_cmd(iob);
2295

2296 2297 2298
	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
		       iob->data);
	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2299 2300
	iob->finalize = qeth_idx_finalize_cmd;

2301
	port |= QETH_IDX_ACT_INVAL_FRAME;
2302 2303 2304 2305 2306
	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
	       &card->info.func_level, 2);
2307
	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2308 2309 2310 2311 2312 2313 2314 2315 2316
	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}

static int qeth_idx_activate_read_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->read;
	struct qeth_cmd_buffer *iob;
	int rc;

2317
	QETH_CARD_TEXT(card, 2, "idxread");
2318

2319
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2320 2321 2322 2323 2324
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2325
	iob->callback = qeth_idx_activate_read_channel_cb;
2326

2327
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

static int qeth_idx_activate_write_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->write;
	struct qeth_cmd_buffer *iob;
	int rc;

2341
	QETH_CARD_TEXT(card, 2, "idxwrite");
2342

2343
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2344 2345 2346 2347 2348
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2349
	iob->callback = qeth_idx_activate_write_channel_cb;
2350

2351
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2352 2353 2354 2355 2356 2357 2358
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

F
Frank Blaschka 已提交
2359 2360 2361 2362 2363
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2364
	QETH_CARD_TEXT(card, 2, "cmenblcb");
F
Frank Blaschka 已提交
2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_filter_r,
	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_enable(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2377
	QETH_CARD_TEXT(card, 2, "cmenable");
F
Frank Blaschka 已提交
2378

2379
	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2380 2381
	if (!iob)
		return -ENOMEM;
2382

F
Frank Blaschka 已提交
2383 2384 2385 2386 2387
	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);

2388
	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
F
Frank Blaschka 已提交
2389 2390 2391 2392 2393 2394 2395
}

static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2396
	QETH_CARD_TEXT(card, 2, "cmsetpcb");
F
Frank Blaschka 已提交
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_connection_r,
	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_setup(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2409
	QETH_CARD_TEXT(card, 2, "cmsetup");
F
Frank Blaschka 已提交
2410

2411
	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2412 2413
	if (!iob)
		return -ENOMEM;
2414

F
Frank Blaschka 已提交
2415 2416 2417 2418 2419 2420
	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2421
	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
F
Frank Blaschka 已提交
2422 2423
}

2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
{
	if (link_type == QETH_LINK_TYPE_LANE_TR ||
	    link_type == QETH_LINK_TYPE_HSTR) {
		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
		return false;
	}

	return true;
}

2435
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
F
Frank Blaschka 已提交
2436
{
2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
	struct net_device *dev = card->dev;
	unsigned int new_mtu;

	if (!max_mtu) {
		/* IQD needs accurate max MTU to set up its RX buffers: */
		if (IS_IQD(card))
			return -EINVAL;
		/* tolerate quirky HW: */
		max_mtu = ETH_MAX_MTU;
	}

	rtnl_lock();
	if (IS_IQD(card)) {
		/* move any device with default MTU to new max MTU: */
		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;

		/* adjust RX buffer size to new max MTU: */
		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
		if (dev->max_mtu && dev->max_mtu != max_mtu)
2456
			qeth_free_qdio_queues(card);
2457 2458 2459 2460
	} else {
		if (dev->mtu)
			new_mtu = dev->mtu;
		/* default MTUs for first setup: */
2461
		else if (IS_LAYER2(card))
2462 2463 2464
			new_mtu = ETH_DATA_LEN;
		else
			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
F
Frank Blaschka 已提交
2465
	}
2466 2467 2468 2469 2470

	dev->max_mtu = max_mtu;
	dev->mtu = min(new_mtu, max_mtu);
	rtnl_unlock();
	return 0;
F
Frank Blaschka 已提交
2471 2472
}

J
Julian Wiedmann 已提交
2473
static int qeth_get_mtu_outof_framesize(int framesize)
F
Frank Blaschka 已提交
2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
{
	switch (framesize) {
	case 0x4000:
		return 8192;
	case 0x6000:
		return 16384;
	case 0xa000:
		return 32768;
	case 0xffff:
		return 57344;
	default:
		return 0;
	}
}

static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	__u16 mtu, framesize;
	__u16 len;
	struct qeth_cmd_buffer *iob;
2495
	u8 link_type = 0;
F
Frank Blaschka 已提交
2496

2497
	QETH_CARD_TEXT(card, 2, "ulpenacb");
F
Frank Blaschka 已提交
2498 2499 2500 2501 2502

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_filter_r,
	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2503
	if (IS_IQD(card)) {
F
Frank Blaschka 已提交
2504 2505 2506
		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
		mtu = qeth_get_mtu_outof_framesize(framesize);
	} else {
2507
		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
F
Frank Blaschka 已提交
2508
	}
2509
	*(u16 *)reply->param = mtu;
F
Frank Blaschka 已提交
2510 2511 2512 2513 2514

	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
		memcpy(&link_type,
		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2515 2516 2517 2518 2519
		if (!qeth_is_supported_link_type(card, link_type))
			return -EPROTONOSUPPORT;
	}

	card->info.link_type = link_type;
2520
	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
F
Frank Blaschka 已提交
2521 2522 2523
	return 0;
}

2524 2525 2526 2527
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
	if (IS_OSN(card))
		return QETH_PROT_OSN2;
2528
	return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2529 2530
}

F
Frank Blaschka 已提交
2531 2532
static int qeth_ulp_enable(struct qeth_card *card)
{
2533
	u8 prot_type = qeth_mpc_select_prot_type(card);
F
Frank Blaschka 已提交
2534
	struct qeth_cmd_buffer *iob;
2535
	u16 max_mtu;
2536
	int rc;
F
Frank Blaschka 已提交
2537

2538
	QETH_CARD_TEXT(card, 2, "ulpenabl");
F
Frank Blaschka 已提交
2539

2540
	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2541 2542
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2543

2544
	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
F
Frank Blaschka 已提交
2545 2546 2547 2548 2549
	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2550
	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2551 2552 2553
	if (rc)
		return rc;
	return qeth_update_max_mtu(card, max_mtu);
F
Frank Blaschka 已提交
2554 2555 2556 2557 2558 2559 2560
}

static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2561
	QETH_CARD_TEXT(card, 2, "ulpstpcb");
F
Frank Blaschka 已提交
2562 2563 2564 2565 2566

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_connection_r,
	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2567 2568
	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
		     3)) {
2569
		QETH_CARD_TEXT(card, 2, "olmlimit");
2570 2571
		dev_err(&card->gdev->dev, "A connection could not be "
			"established because of an OLM limit\n");
2572
		return -EMLINK;
2573
	}
S
Stefan Raspl 已提交
2574
	return 0;
F
Frank Blaschka 已提交
2575 2576 2577 2578 2579 2580 2581
}

static int qeth_ulp_setup(struct qeth_card *card)
{
	__u16 temp;
	struct qeth_cmd_buffer *iob;

2582
	QETH_CARD_TEXT(card, 2, "ulpsetup");
F
Frank Blaschka 已提交
2583

2584
	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2585 2586
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2587 2588 2589 2590 2591 2592 2593 2594

	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);

2595
	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
F
Frank Blaschka 已提交
2596 2597
	temp = (card->info.cula << 8) + card->info.unit_addr2;
	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2598
	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
F
Frank Blaschka 已提交
2599 2600
}

2601 2602 2603 2604 2605
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
{
	struct qeth_qdio_out_buffer *newbuf;

	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2606 2607 2608
	if (!newbuf)
		return -ENOMEM;

2609
	newbuf->buffer = q->qdio_bufs[bidx];
2610 2611 2612 2613 2614 2615
	skb_queue_head_init(&newbuf->skb_list);
	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
	newbuf->q = q;
	newbuf->next_pending = q->bufs[bidx];
	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
	q->bufs[bidx] = newbuf;
2616
	return 0;
2617 2618
}

2619
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2620 2621 2622 2623
{
	if (!q)
		return;

2624
	qeth_drain_output_queue(q, true);
2625 2626 2627 2628
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	kfree(q);
}

2629
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
{
	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);

	if (!q)
		return NULL;

	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
		kfree(q);
		return NULL;
	}
	return q;
}
2642

2643 2644 2645 2646 2647 2648 2649 2650
static void qeth_tx_completion_timer(struct timer_list *timer)
{
	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);

	napi_schedule(&queue->napi);
	QETH_TXQ_STAT_INC(queue, completion_timer);
}

2651
static int qeth_alloc_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2652 2653 2654
{
	int i, j;

2655
	QETH_CARD_TEXT(card, 2, "allcqdbf");
F
Frank Blaschka 已提交
2656 2657 2658 2659 2660

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
		return 0;

2661
	QETH_CARD_TEXT(card, 2, "inq");
S
Sebastian Ott 已提交
2662
	card->qdio.in_q = qeth_alloc_qdio_queue();
F
Frank Blaschka 已提交
2663 2664
	if (!card->qdio.in_q)
		goto out_nomem;
S
Sebastian Ott 已提交
2665

F
Frank Blaschka 已提交
2666 2667 2668
	/* inbound buffer pool */
	if (qeth_alloc_buffer_pool(card))
		goto out_freeinq;
2669

F
Frank Blaschka 已提交
2670 2671
	/* outbound */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2672 2673 2674 2675
		struct qeth_qdio_out_q *queue;

		queue = qeth_alloc_output_queue();
		if (!queue)
F
Frank Blaschka 已提交
2676
			goto out_freeoutq;
2677
		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2678 2679 2680 2681
		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
		card->qdio.out_qs[i] = queue;
		queue->card = card;
		queue->queue_no = i;
2682
		spin_lock_init(&queue->lock);
2683
		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2684 2685
		queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
		queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2686
		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2687

F
Frank Blaschka 已提交
2688 2689
		/* give outbound qeth_qdio_buffers their qdio_buffers */
		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2690 2691
			WARN_ON(queue->bufs[j]);
			if (qeth_init_qdio_out_buf(queue, j))
2692
				goto out_freeoutqbufs;
F
Frank Blaschka 已提交
2693 2694
		}
	}
2695 2696 2697 2698 2699

	/* completion */
	if (qeth_alloc_cq(card))
		goto out_freeoutq;

F
Frank Blaschka 已提交
2700 2701
	return 0;

2702 2703 2704 2705 2706 2707 2708
out_freeoutqbufs:
	while (j > 0) {
		--j;
		kmem_cache_free(qeth_qdio_outbuf_cache,
				card->qdio.out_qs[i]->bufs[j]);
		card->qdio.out_qs[i]->bufs[j] = NULL;
	}
F
Frank Blaschka 已提交
2709
out_freeoutq:
2710
	while (i > 0) {
2711
		qeth_free_output_queue(card->qdio.out_qs[--i]);
2712 2713
		card->qdio.out_qs[i] = NULL;
	}
F
Frank Blaschka 已提交
2714 2715
	qeth_free_buffer_pool(card);
out_freeinq:
S
Sebastian Ott 已提交
2716
	qeth_free_qdio_queue(card->qdio.in_q);
F
Frank Blaschka 已提交
2717 2718 2719 2720 2721 2722
	card->qdio.in_q = NULL;
out_nomem:
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
	return -ENOMEM;
}

2723
static void qeth_free_qdio_queues(struct qeth_card *card)
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
{
	int i, j;

	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
		QETH_QDIO_UNINITIALIZED)
		return;

	qeth_free_cq(card);
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (card->qdio.in_q->bufs[j].rx_skb)
			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
	}
	qeth_free_qdio_queue(card->qdio.in_q);
	card->qdio.in_q = NULL;
	/* inbound buffer pool */
	qeth_free_buffer_pool(card);
	/* free outbound qdio_qs */
2741 2742 2743
	for (i = 0; i < card->qdio.no_out_queues; i++) {
		qeth_free_output_queue(card->qdio.out_qs[i]);
		card->qdio.out_qs[i] = NULL;
2744 2745 2746
	}
}

2747 2748 2749
static void qeth_fill_qib_parms(struct qeth_card *card,
				struct qeth_qib_parms *parms)
{
2750 2751 2752
	struct qeth_qdio_out_q *queue;
	unsigned int i;

2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769
	parms->pcit_magic[0] = 'P';
	parms->pcit_magic[1] = 'C';
	parms->pcit_magic[2] = 'I';
	parms->pcit_magic[3] = 'T';
	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);

	parms->blkt_magic[0] = 'B';
	parms->blkt_magic[1] = 'L';
	parms->blkt_magic[2] = 'K';
	parms->blkt_magic[3] = 'T';
	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
	parms->blkt_total = card->info.blkt.time_total;
	parms->blkt_inter_packet = card->info.blkt.inter_packet;
	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784

	/* Prio-queueing implicitly uses the default priorities: */
	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
		return;

	parms->pque_magic[0] = 'P';
	parms->pque_magic[1] = 'Q';
	parms->pque_magic[2] = 'U';
	parms->pque_magic[3] = 'E';
	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;

	qeth_for_each_output_queue(card, queue, i)
		parms->pque_priority[i] = queue->priority;
F
Frank Blaschka 已提交
2785 2786 2787 2788
}

static int qeth_qdio_activate(struct qeth_card *card)
{
2789
	QETH_CARD_TEXT(card, 3, "qdioact");
J
Jan Glauber 已提交
2790
	return qdio_activate(CARD_DDEV(card));
F
Frank Blaschka 已提交
2791 2792 2793 2794 2795 2796
}

static int qeth_dm_act(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2797
	QETH_CARD_TEXT(card, 2, "dmact");
F
Frank Blaschka 已提交
2798

2799
	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2800 2801
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2802 2803 2804 2805 2806

	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2807
	return qeth_send_control_data(card, iob, NULL, NULL);
F
Frank Blaschka 已提交
2808 2809 2810 2811 2812 2813
}

static int qeth_mpc_initialize(struct qeth_card *card)
{
	int rc;

2814
	QETH_CARD_TEXT(card, 2, "mpcinit");
F
Frank Blaschka 已提交
2815 2816 2817

	rc = qeth_issue_next_read(card);
	if (rc) {
2818
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2819 2820 2821 2822
		return rc;
	}
	rc = qeth_cm_enable(card);
	if (rc) {
2823
		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2824
		return rc;
F
Frank Blaschka 已提交
2825 2826 2827
	}
	rc = qeth_cm_setup(card);
	if (rc) {
2828
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2829
		return rc;
F
Frank Blaschka 已提交
2830 2831 2832
	}
	rc = qeth_ulp_enable(card);
	if (rc) {
2833
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2834
		return rc;
F
Frank Blaschka 已提交
2835 2836 2837
	}
	rc = qeth_ulp_setup(card);
	if (rc) {
2838
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2839
		return rc;
F
Frank Blaschka 已提交
2840
	}
2841
	rc = qeth_alloc_qdio_queues(card);
F
Frank Blaschka 已提交
2842
	if (rc) {
2843
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2844
		return rc;
F
Frank Blaschka 已提交
2845 2846 2847
	}
	rc = qeth_qdio_establish(card);
	if (rc) {
2848
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2849
		qeth_free_qdio_queues(card);
2850
		return rc;
F
Frank Blaschka 已提交
2851 2852 2853
	}
	rc = qeth_qdio_activate(card);
	if (rc) {
2854
		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2855
		return rc;
F
Frank Blaschka 已提交
2856 2857 2858
	}
	rc = qeth_dm_act(card);
	if (rc) {
2859
		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2860
		return rc;
F
Frank Blaschka 已提交
2861 2862 2863 2864 2865
	}

	return 0;
}

2866
static void qeth_print_status_message(struct qeth_card *card)
F
Frank Blaschka 已提交
2867 2868
{
	switch (card->info.type) {
2869 2870 2871
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSM:
	case QETH_CARD_TYPE_OSX:
F
Frank Blaschka 已提交
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
		/* VM will use a non-zero first character
		 * to indicate a HiperSockets like reporting
		 * of the level OSA sets the first character to zero
		 * */
		if (!card->info.mcl_level[0]) {
			sprintf(card->info.mcl_level, "%02x%02x",
				card->info.mcl_level[2],
				card->info.mcl_level[3]);
			break;
		}
2882
		fallthrough;
F
Frank Blaschka 已提交
2883
	case QETH_CARD_TYPE_IQD:
2884
		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
F
Frank Blaschka 已提交
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
				card->info.mcl_level[0]];
			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
				card->info.mcl_level[1]];
			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
				card->info.mcl_level[2]];
			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
				card->info.mcl_level[3]];
			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
		}
		break;
	default:
		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
	}
2899 2900 2901 2902 2903 2904 2905
	dev_info(&card->gdev->dev,
		 "Device is a%s card%s%s%s\nwith link type %s.\n",
		 qeth_get_cardname(card),
		 (card->info.mcl_level[0]) ? " (level: " : "",
		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
		 (card->info.mcl_level[0]) ? ")" : "",
		 qeth_get_cardname_short(card));
F
Frank Blaschka 已提交
2906 2907 2908 2909 2910 2911
}

static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry;

C
Carsten Otte 已提交
2912
	QETH_CARD_TEXT(card, 5, "inwrklst");
F
Frank Blaschka 已提交
2913 2914 2915 2916 2917 2918 2919

	list_for_each_entry(entry,
			    &card->qdio.init_pool.entry_list, init_list) {
		qeth_put_buffer_pool_entry(card, entry);
	}
}

J
Julian Wiedmann 已提交
2920 2921
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
					struct qeth_card *card)
F
Frank Blaschka 已提交
2922 2923 2924 2925 2926 2927 2928
{
	struct qeth_buffer_pool_entry *entry;
	int i, free;

	if (list_empty(&card->qdio.in_buf_pool.entry_list))
		return NULL;

2929
	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
F
Frank Blaschka 已提交
2930 2931
		free = 1;
		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2932
			if (page_count(entry->elements[i]) > 1) {
F
Frank Blaschka 已提交
2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
				free = 0;
				break;
			}
		}
		if (free) {
			list_del_init(&entry->list);
			return entry;
		}
	}

	/* no free buffer in pool so take first one and swap pages */
2944 2945
	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
				 struct qeth_buffer_pool_entry, list);
F
Frank Blaschka 已提交
2946
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2947
		if (page_count(entry->elements[i]) > 1) {
2948
			struct page *page = dev_alloc_page();
2949 2950

			if (!page)
F
Frank Blaschka 已提交
2951
				return NULL;
2952 2953 2954 2955

			__free_page(entry->elements[i]);
			entry->elements[i] = page;
			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
F
Frank Blaschka 已提交
2956 2957 2958 2959 2960 2961 2962 2963 2964
		}
	}
	list_del_init(&entry->list);
	return entry;
}

static int qeth_init_input_buffer(struct qeth_card *card,
		struct qeth_qdio_buffer *buf)
{
2965
	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
F
Frank Blaschka 已提交
2966 2967
	int i;

2968
	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2969
		buf->rx_skb = netdev_alloc_skb(card->dev,
2970 2971
					       ETH_HLEN +
					       sizeof(struct ipv6hdr));
2972
		if (!buf->rx_skb)
2973
			return -ENOMEM;
2974 2975
	}

2976 2977 2978 2979 2980 2981 2982
	if (!pool_entry) {
		pool_entry = qeth_find_free_buffer_pool_entry(card);
		if (!pool_entry)
			return -ENOBUFS;

		buf->pool_entry = pool_entry;
	}
F
Frank Blaschka 已提交
2983 2984 2985 2986 2987 2988 2989 2990 2991

	/*
	 * since the buffer is accessed only from the input_tasklet
	 * there shouldn't be a need to synchronize; also, since we use
	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
	 * buffers
	 */
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
		buf->buffer->element[i].length = PAGE_SIZE;
2992
		buf->buffer->element[i].addr =
2993
			page_to_phys(pool_entry->elements[i]);
F
Frank Blaschka 已提交
2994
		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2995
			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
F
Frank Blaschka 已提交
2996
		else
2997 2998
			buf->buffer->element[i].eflags = 0;
		buf->buffer->element[i].sflags = 0;
F
Frank Blaschka 已提交
2999 3000 3001 3002
	}
	return 0;
}

J
Julian Wiedmann 已提交
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
					    struct qeth_qdio_out_q *queue)
{
	if (!IS_IQD(card) ||
	    qeth_iqd_is_mcast_queue(card, queue) ||
	    card->options.cq == QETH_CQ_ENABLED ||
	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
		return 1;

	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}

3015
static int qeth_init_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
3016
{
3017
	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
3018
	unsigned int i;
F
Frank Blaschka 已提交
3019 3020
	int rc;

3021
	QETH_CARD_TEXT(card, 2, "initqdqs");
F
Frank Blaschka 已提交
3022 3023

	/* inbound queue */
3024 3025
	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	memset(&card->rx, 0, sizeof(struct qeth_rx));
3026

F
Frank Blaschka 已提交
3027 3028
	qeth_initialize_working_pool_list(card);
	/*give only as many buffers to hardware as we have buffer pool entries*/
3029
	for (i = 0; i < rx_bufs; i++) {
3030 3031 3032 3033 3034
		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
		if (rc)
			return rc;
	}

3035 3036
	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
F
Frank Blaschka 已提交
3037
	if (rc) {
3038
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
3039 3040
		return rc;
	}
3041 3042 3043 3044 3045 3046 3047

	/* completion */
	rc = qeth_cq_init(card);
	if (rc) {
		return rc;
	}

F
Frank Blaschka 已提交
3048 3049
	/* outbound queue */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
3050 3051 3052 3053 3054 3055
		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];

		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
		queue->next_buf_to_fill = 0;
		queue->do_pack = 0;
3056
		queue->prev_hdr = NULL;
3057
		queue->coalesced_frames = 0;
3058
		queue->bulk_start = 0;
J
Julian Wiedmann 已提交
3059 3060
		queue->bulk_count = 0;
		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3061 3062
		atomic_set(&queue->used_buffers, 0);
		atomic_set(&queue->set_pci_flags_count, 0);
3063
		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
F
Frank Blaschka 已提交
3064 3065 3066 3067
	}
	return 0;
}

3068
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3069
				  struct qeth_cmd_buffer *iob)
3070
{
3071
	qeth_mpc_finalize_cmd(card, iob);
3072 3073

	/* override with IPA-specific values: */
3074
	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3075 3076
}

3077
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3078 3079 3080
			  u16 cmd_length,
			  bool (*match)(struct qeth_cmd_buffer *iob,
					struct qeth_cmd_buffer *reply))
3081 3082
{
	u8 prot_type = qeth_mpc_select_prot_type(card);
3083
	u16 total_length = iob->length;
3084

3085 3086
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
		       iob->data);
3087
	iob->finalize = qeth_ipa_finalize_cmd;
3088
	iob->match = match;
3089

3090
	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3091
	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3092
	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3093 3094
	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3095 3096
	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3097
	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3098 3099 3100
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);

3101 3102 3103 3104 3105 3106 3107 3108
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);

	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
}

3109 3110 3111 3112 3113 3114
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
					   enum qeth_ipa_cmds cmd_code,
					   enum qeth_prot_versions prot,
					   unsigned int data_length)
{
	struct qeth_cmd_buffer *iob;
3115
	struct qeth_ipacmd_hdr *hdr;
3116 3117 3118 3119 3120 3121 3122

	data_length += offsetof(struct qeth_ipa_cmd, data);
	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
			     QETH_IPA_TIMEOUT);
	if (!iob)
		return NULL;

3123
	qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3124 3125 3126 3127 3128

	hdr = &__ipa_cmd(iob)->hdr;
	hdr->command = cmd_code;
	hdr->initiator = IPA_CMD_INITIATOR_HOST;
	/* hdr->seqno is set by qeth_send_control_data() */
3129
	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3130 3131 3132 3133
	hdr->rel_adapter_no = (u8) card->dev->dev_port;
	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
	hdr->param_count = 1;
	hdr->prot_version = prot;
3134 3135 3136 3137
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);

3138 3139 3140 3141 3142 3143 3144 3145
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

E
Eugene Crosser 已提交
3146 3147 3148 3149 3150 3151
/**
 * qeth_send_ipa_cmd() - send an IPA command
 *
 * See qeth_send_control_data() for explanation of the arguments.
 */

F
Frank Blaschka 已提交
3152 3153 3154 3155 3156 3157 3158
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
			unsigned long),
		void *reply_param)
{
	int rc;

C
Carsten Otte 已提交
3159
	QETH_CARD_TEXT(card, 4, "sendipa");
3160

3161
	if (card->read_or_write_problem) {
3162
		qeth_put_cmd(iob);
3163 3164 3165
		return -EIO;
	}

3166 3167
	if (reply_cb == NULL)
		reply_cb = qeth_send_ipa_cmd_cb;
3168
	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3169 3170 3171 3172
	if (rc == -ETIME) {
		qeth_clear_ipacmd_list(card);
		qeth_schedule_recovery(card);
	}
F
Frank Blaschka 已提交
3173 3174 3175 3176
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);

3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187
static int qeth_send_startlan_cb(struct qeth_card *card,
				 struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
		return -ENETDOWN;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

3188
static int qeth_send_startlan(struct qeth_card *card)
F
Frank Blaschka 已提交
3189
{
3190
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
3191

3192
	QETH_CARD_TEXT(card, 2, "strtlan");
F
Frank Blaschka 已提交
3193

3194
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3195 3196
	if (!iob)
		return -ENOMEM;
3197
	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
F
Frank Blaschka 已提交
3198 3199
}

3200
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
3201
{
3202
	if (!cmd->hdr.return_code)
F
Frank Blaschka 已提交
3203 3204
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
3205
	return cmd->hdr.return_code;
F
Frank Blaschka 已提交
3206 3207 3208 3209 3210
}

static int qeth_query_setadapterparms_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3211
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3212
	struct qeth_query_cmds_supp *query_cmd;
F
Frank Blaschka 已提交
3213

C
Carsten Otte 已提交
3214
	QETH_CARD_TEXT(card, 3, "quyadpcb");
3215
	if (qeth_setadpparms_inspect_rc(cmd))
3216
		return -EIO;
F
Frank Blaschka 已提交
3217

3218 3219 3220 3221 3222 3223
	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
	if (query_cmd->lan_type & 0x7f) {
		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
			return -EPROTONOSUPPORT;

		card->info.link_type = query_cmd->lan_type;
3224
		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3225
	}
3226 3227

	card->options.adp.supported = query_cmd->supported_cmds;
3228
	return 0;
F
Frank Blaschka 已提交
3229 3230
}

S
Stefan Raspl 已提交
3231
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3232 3233
						    enum qeth_ipa_setadp_cmd adp_cmd,
						    unsigned int data_length)
F
Frank Blaschka 已提交
3234
{
3235
	struct qeth_ipacmd_setadpparms_hdr *hdr;
F
Frank Blaschka 已提交
3236 3237
	struct qeth_cmd_buffer *iob;

3238 3239 3240 3241 3242 3243
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
				 data_length +
				 offsetof(struct qeth_ipacmd_setadpparms,
					  data));
	if (!iob)
		return NULL;
F
Frank Blaschka 已提交
3244

3245 3246 3247 3248 3249
	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
	hdr->cmdlength = sizeof(*hdr) + data_length;
	hdr->command_code = adp_cmd;
	hdr->used_total = 1;
	hdr->seq_no = 1;
F
Frank Blaschka 已提交
3250 3251 3252
	return iob;
}

3253
static int qeth_query_setadapterparms(struct qeth_card *card)
F
Frank Blaschka 已提交
3254 3255 3256 3257
{
	int rc;
	struct qeth_cmd_buffer *iob;

C
Carsten Otte 已提交
3258
	QETH_CARD_TEXT(card, 3, "queryadp");
F
Frank Blaschka 已提交
3259
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3260
				   SETADP_DATA_SIZEOF(query_cmds_supp));
3261 3262
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
3263 3264 3265 3266
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
	return rc;
}

3267 3268 3269 3270 3271
static int qeth_query_ipassists_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd;

3272
	QETH_CARD_TEXT(card, 2, "qipasscb");
3273 3274

	cmd = (struct qeth_ipa_cmd *) data;
3275 3276

	switch (cmd->hdr.return_code) {
3277 3278
	case IPA_RC_SUCCESS:
		break;
3279 3280
	case IPA_RC_NOTSUPP:
	case IPA_RC_L2_UNSUPPORTED_CMD:
3281
		QETH_CARD_TEXT(card, 2, "ipaunsup");
3282 3283
		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3284
		return -EOPNOTSUPP;
3285
	default:
3286 3287 3288
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
				 CARD_DEVID(card), cmd->hdr.return_code);
		return -EIO;
3289 3290
	}

3291 3292 3293 3294 3295
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
		card->options.ipa4 = cmd->hdr.assists;
	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
		card->options.ipa6 = cmd->hdr.assists;
	else
3296 3297
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
				 CARD_DEVID(card));
3298 3299 3300
	return 0;
}

3301 3302
static int qeth_query_ipassists(struct qeth_card *card,
				enum qeth_prot_versions prot)
3303 3304 3305 3306
{
	int rc;
	struct qeth_cmd_buffer *iob;

3307
	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3308
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3309 3310
	if (!iob)
		return -ENOMEM;
3311 3312 3313 3314
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
	return rc;
}

3315 3316 3317
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
3318
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3319
	struct qeth_query_switch_attributes *attrs;
3320
	struct qeth_switch_info *sw_info;
3321 3322

	QETH_CARD_TEXT(card, 2, "qswiatcb");
3323
	if (qeth_setadpparms_inspect_rc(cmd))
3324
		return -EIO;
3325

3326 3327 3328 3329 3330 3331
	sw_info = (struct qeth_switch_info *)reply->param;
	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
	sw_info->capabilities = attrs->capabilities;
	sw_info->settings = attrs->settings;
	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
			sw_info->settings);
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
	return 0;
}

int qeth_query_switch_attributes(struct qeth_card *card,
				 struct qeth_switch_info *sw_info)
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qswiattr");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
		return -EOPNOTSUPP;
	if (!netif_carrier_ok(card->dev))
		return -ENOMEDIUM;
3345
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3346 3347
	if (!iob)
		return -ENOMEM;
3348 3349 3350 3351
	return qeth_send_ipa_cmd(card, iob,
				qeth_query_switch_attributes_cb, sw_info);
}

3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
					  enum qeth_diags_cmds sub_cmd,
					  unsigned int data_length)
{
	struct qeth_ipacmd_diagass *cmd;
	struct qeth_cmd_buffer *iob;

	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
				 DIAG_HDR_LEN + data_length);
	if (!iob)
		return NULL;

	cmd = &__ipa_cmd(iob)->data.diagass;
	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
	cmd->subcmd = sub_cmd;
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);

3371 3372 3373
static int qeth_query_setdiagass_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3374 3375
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3376

3377
	if (rc) {
3378
		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3379 3380 3381 3382
		return -EIO;
	}

	card->info.diagass_support = cmd->data.diagass.ext;
3383 3384 3385 3386 3387 3388 3389
	return 0;
}

static int qeth_query_setdiagass(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

3390
	QETH_CARD_TEXT(card, 2, "qdiagass");
3391
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3392 3393
	if (!iob)
		return -ENOMEM;
3394 3395 3396 3397 3398 3399 3400 3401 3402
	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}

static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
	unsigned long info = get_zeroed_page(GFP_KERNEL);
	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
	struct ccw_dev_id ccwid;
3403
	int level;
3404 3405 3406 3407 3408 3409 3410

	tid->chpid = card->info.chpid;
	ccw_device_get_id(CARD_RDEV(card), &ccwid);
	tid->ssid = ccwid.ssid;
	tid->devno = ccwid.devno;
	if (!info)
		return;
3411 3412
	level = stsi(NULL, 0, 0, 0);
	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3413
		tid->lparnr = info222->lpar_number;
3414
	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3415 3416 3417 3418 3419 3420 3421 3422 3423
		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
	}
	free_page(info);
}

static int qeth_hw_trap_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3424 3425
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3426

3427
	if (rc) {
3428
		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3429 3430
		return -EIO;
	}
3431 3432 3433 3434 3435 3436 3437 3438
	return 0;
}

int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

3439
	QETH_CARD_TEXT(card, 2, "diagtrap");
3440
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3441 3442
	if (!iob)
		return -ENOMEM;
3443
	cmd = __ipa_cmd(iob);
3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462
	cmd->data.diagass.type = 1;
	cmd->data.diagass.action = action;
	switch (action) {
	case QETH_DIAGS_TRAP_ARM:
		cmd->data.diagass.options = 0x0003;
		cmd->data.diagass.ext = 0x00010000 +
			sizeof(struct qeth_trap_id);
		qeth_get_trap_id(card,
			(struct qeth_trap_id *)cmd->data.diagass.cdata);
		break;
	case QETH_DIAGS_TRAP_DISARM:
		cmd->data.diagass.options = 0x0001;
		break;
	case QETH_DIAGS_TRAP_CAPTURE:
		break;
	}
	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}

3463 3464 3465 3466
static int qeth_check_qdio_errors(struct qeth_card *card,
				  struct qdio_buffer *buf,
				  unsigned int qdio_error,
				  const char *dbftext)
F
Frank Blaschka 已提交
3467
{
J
Jan Glauber 已提交
3468
	if (qdio_error) {
C
Carsten Otte 已提交
3469
		QETH_CARD_TEXT(card, 2, dbftext);
C
Carsten Otte 已提交
3470
		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3471
			       buf->element[15].sflags);
C
Carsten Otte 已提交
3472
		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3473
			       buf->element[14].sflags);
C
Carsten Otte 已提交
3474
		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3475
		if ((buf->element[15].sflags) == 0x12) {
3476
			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3477 3478 3479
			return 0;
		} else
			return 1;
F
Frank Blaschka 已提交
3480 3481 3482 3483
	}
	return 0;
}

3484 3485
static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
					 unsigned int count)
F
Frank Blaschka 已提交
3486 3487
{
	struct qeth_qdio_q *queue = card->qdio.in_q;
3488
	struct list_head *lh;
F
Frank Blaschka 已提交
3489 3490 3491 3492 3493 3494 3495 3496 3497
	int i;
	int rc;
	int newcount = 0;

	/* only requeue at a certain threshold to avoid SIGAs */
	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
		for (i = queue->next_buf_to_init;
		     i < queue->next_buf_to_init + count; ++i) {
			if (qeth_init_input_buffer(card,
J
Julian Wiedmann 已提交
3498
				&queue->bufs[QDIO_BUFNR(i)])) {
F
Frank Blaschka 已提交
3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513
				break;
			} else {
				newcount++;
			}
		}

		if (newcount < count) {
			/* we are in memory shortage so we switch back to
			   traditional skb allocation and drop packages */
			atomic_set(&card->force_alloc_skb, 3);
			count = newcount;
		} else {
			atomic_add_unless(&card->force_alloc_skb, -1, 0);
		}

3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
		if (!count) {
			i = 0;
			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
				i++;
			if (i == card->qdio.in_buf_pool.buf_count) {
				QETH_CARD_TEXT(card, 2, "qsarbw");
				schedule_delayed_work(
					&card->buffer_reclaim_work,
					QETH_RECLAIM_WORK_TIME);
			}
3524
			return 0;
3525 3526
		}

J
Jan Glauber 已提交
3527 3528
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
			     queue->next_buf_to_init, count);
F
Frank Blaschka 已提交
3529
		if (rc) {
C
Carsten Otte 已提交
3530
			QETH_CARD_TEXT(card, 2, "qinberr");
F
Frank Blaschka 已提交
3531
		}
J
Julian Wiedmann 已提交
3532 3533
		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
						     count);
3534
		return count;
F
Frank Blaschka 已提交
3535
	}
3536 3537

	return 0;
F
Frank Blaschka 已提交
3538
}
3539 3540 3541

static void qeth_buffer_reclaim_work(struct work_struct *work)
{
3542 3543 3544
	struct qeth_card *card = container_of(to_delayed_work(work),
					      struct qeth_card,
					      buffer_reclaim_work);
3545

3546 3547 3548 3549
	local_bh_disable();
	napi_schedule(&card->napi);
	/* kick-start the NAPI softirq: */
	local_bh_enable();
3550
}
F
Frank Blaschka 已提交
3551

3552
static void qeth_handle_send_error(struct qeth_card *card,
J
Jan Glauber 已提交
3553
		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
F
Frank Blaschka 已提交
3554
{
3555
	int sbalf15 = buffer->buffer->element[15].sflags;
F
Frank Blaschka 已提交
3556

C
Carsten Otte 已提交
3557
	QETH_CARD_TEXT(card, 6, "hdsnderr");
3558
	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3559 3560

	if (!qdio_err)
3561
		return;
3562 3563

	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3564
		return;
3565

C
Carsten Otte 已提交
3566 3567
	QETH_CARD_TEXT(card, 1, "lnkfail");
	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3568
		       (u16)qdio_err, (u8)sbalf15);
F
Frank Blaschka 已提交
3569 3570
}

3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
/**
 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
 * @queue: queue to check for packing buffer
 *
 * Returns number of buffers that were prepared for flush.
 */
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
{
	struct qeth_qdio_out_buffer *buffer;

	buffer = queue->bufs[queue->next_buf_to_fill];
	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
	    (buffer->next_element_to_fill > 0)) {
		/* it's a packing buffer */
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
		queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
3587
			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3588 3589 3590 3591 3592
		return 1;
	}
	return 0;
}

F
Frank Blaschka 已提交
3593 3594 3595 3596 3597 3598 3599 3600 3601 3602
/*
 * Switched to packing state if the number of used buffers on a queue
 * reaches a certain limit.
 */
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
	if (!queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    >= QETH_HIGH_WATERMARK_PACK){
			/* switch non-PACKING -> PACKING */
C
Carsten Otte 已提交
3603
			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3604
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621
			queue->do_pack = 1;
		}
	}
}

/*
 * Switches from packing to non-packing mode. If there is a packing
 * buffer on the queue this buffer will be prepared to be flushed.
 * In that case 1 is returned to inform the caller. If no buffer
 * has to be flushed, zero is returned.
 */
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
	if (queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    <= QETH_LOW_WATERMARK_PACK) {
			/* switch PACKING -> non-PACKING */
C
Carsten Otte 已提交
3622
			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3623
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3624
			queue->do_pack = 0;
3625
			return qeth_prep_flush_pack_buffer(queue);
F
Frank Blaschka 已提交
3626 3627 3628 3629 3630
		}
	}
	return 0;
}

J
Jan Glauber 已提交
3631 3632
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
			       int count)
F
Frank Blaschka 已提交
3633
{
3634 3635
	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
	unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3636
	struct qeth_card *card = queue->card;
F
Frank Blaschka 已提交
3637 3638 3639 3640
	int rc;
	int i;

	for (i = index; i < index + count; ++i) {
J
Julian Wiedmann 已提交
3641
		unsigned int bidx = QDIO_BUFNR(i);
3642
		struct sk_buff *skb;
J
Julian Wiedmann 已提交
3643

3644
		buf = queue->bufs[bidx];
3645 3646
		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
				SBAL_EFLAGS_LAST_ENTRY;
3647
		queue->coalesced_frames += buf->frames;
F
Frank Blaschka 已提交
3648

3649 3650 3651
		if (queue->bufstates)
			queue->bufstates[bidx].user = buf;

3652 3653 3654 3655
		if (IS_IQD(card)) {
			skb_queue_walk(&buf->skb_list, skb)
				skb_tx_timestamp(skb);
		}
3656
	}
F
Frank Blaschka 已提交
3657

3658
	if (!IS_IQD(card)) {
F
Frank Blaschka 已提交
3659 3660 3661 3662 3663 3664 3665 3666
		if (!queue->do_pack) {
			if ((atomic_read(&queue->used_buffers) >=
				(QETH_HIGH_WATERMARK_PACK -
				 QETH_WATERMARK_PACK_FUZZ)) &&
			    !atomic_read(&queue->set_pci_flags_count)) {
				/* it's likely that we'll go to packing
				 * mode soon */
				atomic_inc(&queue->set_pci_flags_count);
3667
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679
			}
		} else {
			if (!atomic_read(&queue->set_pci_flags_count)) {
				/*
				 * there's no outstanding PCI any more, so we
				 * have to request a PCI to be sure the the PCI
				 * will wake at some time in the future then we
				 * can flush packed buffers that might still be
				 * hanging around, which can happen if no
				 * further send was requested by the stack
				 */
				atomic_inc(&queue->set_pci_flags_count);
3680
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3681 3682
			}
		}
3683 3684 3685

		if (atomic_read(&queue->set_pci_flags_count))
			qdio_flags |= QDIO_FLAG_PCI_OUT;
F
Frank Blaschka 已提交
3686 3687
	}

3688
	QETH_TXQ_STAT_INC(queue, doorbell);
F
Frank Blaschka 已提交
3689
	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
J
Jan Glauber 已提交
3690
		     queue->queue_no, index, count);
3691 3692

	/* Fake the TX completion interrupt: */
3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704
	if (IS_IQD(card)) {
		unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
		unsigned int usecs = READ_ONCE(queue->coalesce_usecs);

		if (frames && queue->coalesced_frames >= frames) {
			napi_schedule(&queue->napi);
			queue->coalesced_frames = 0;
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (usecs) {
			qeth_tx_arm_timer(queue, usecs);
		}
	}
3705

F
Frank Blaschka 已提交
3706
	if (rc) {
3707
		/* ignore temporary SIGA errors without busy condition */
3708
		if (rc == -ENOBUFS)
3709
			return;
C
Carsten Otte 已提交
3710
		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3711 3712 3713
		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
C
Carsten Otte 已提交
3714
		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3715

F
Frank Blaschka 已提交
3716 3717 3718 3719 3720 3721 3722
		/* this must not happen under normal circumstances. if it
		 * happens something is really wrong -> recover */
		qeth_schedule_recovery(queue->card);
		return;
	}
}

3723 3724
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
J
Julian Wiedmann 已提交
3725
	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3726

J
Julian Wiedmann 已提交
3727
	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3728
	queue->prev_hdr = NULL;
J
Julian Wiedmann 已提交
3729
	queue->bulk_count = 0;
3730 3731
}

F
Frank Blaschka 已提交
3732 3733 3734 3735 3736 3737 3738 3739
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
	/*
	 * check if weed have to switch to non-packing mode or if
	 * we have to get a pci flag out on the queue
	 */
	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
	    !atomic_read(&queue->set_pci_flags_count)) {
3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753
		unsigned int index, flush_cnt;
		bool q_was_packing;

		spin_lock(&queue->lock);

		index = queue->next_buf_to_fill;
		q_was_packing = queue->do_pack;

		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
			flush_cnt = qeth_prep_flush_pack_buffer(queue);

		if (flush_cnt) {
			qeth_flush_buffers(queue, index, flush_cnt);
3754 3755
			if (q_was_packing)
				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
F
Frank Blaschka 已提交
3756
		}
3757 3758

		spin_unlock(&queue->lock);
F
Frank Blaschka 已提交
3759 3760 3761
	}
}

3762
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3763 3764 3765
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3766
	napi_schedule_irqoff(&card->napi);
3767 3768
}

3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
	int rc;

	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
		rc = -1;
		goto out;
	} else {
		if (card->options.cq == cq) {
			rc = 0;
			goto out;
		}

3782
		qeth_free_qdio_queues(card);
3783 3784 3785 3786 3787 3788 3789 3790 3791
		card->options.cq = cq;
		rc = 0;
	}
out:
	return rc;

}
EXPORT_SYMBOL_GPL(qeth_configure_cq);

3792 3793 3794 3795
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
				 unsigned int queue, int first_element,
				 int count)
{
3796 3797 3798 3799 3800 3801 3802 3803 3804
	struct qeth_qdio_q *cq = card->qdio.c_q;
	int i;
	int rc;

	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);

	if (qdio_err) {
3805
		netif_tx_stop_all_queues(card->dev);
3806
		qeth_schedule_recovery(card);
3807
		return;
3808 3809 3810
	}

	for (i = first_element; i < first_element + count; ++i) {
J
Julian Wiedmann 已提交
3811
		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3812
		int e = 0;
3813

3814 3815
		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
		       buffer->element[e].addr) {
3816
			unsigned long phys_aob_addr = buffer->element[e].addr;
3817 3818 3819 3820

			qeth_qdio_handle_aob(card, phys_aob_addr);
			++e;
		}
3821
		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3822 3823 3824 3825 3826 3827 3828 3829 3830
	}
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
		    card->qdio.c_q->next_buf_to_init,
		    count);
	if (rc) {
		dev_warn(&card->gdev->dev,
			"QDIO reported an error, rc=%i\n", rc);
		QETH_CARD_TEXT(card, 2, "qcqherr");
	}
J
Julian Wiedmann 已提交
3831 3832

	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3833 3834
}

3835 3836 3837 3838
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
				    unsigned int qdio_err, int queue,
				    int first_elem, int count,
				    unsigned long card_ptr)
3839 3840 3841
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3842 3843 3844
	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);

3845
	if (qdio_err)
3846 3847 3848
		qeth_schedule_recovery(card);
}

3849 3850 3851 3852
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
				     unsigned int qdio_error, int __queue,
				     int first_element, int count,
				     unsigned long card_ptr)
F
Frank Blaschka 已提交
3853 3854 3855
{
	struct qeth_card *card        = (struct qeth_card *) card_ptr;
	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3856
	struct net_device *dev = card->dev;
3857
	struct netdev_queue *txq;
F
Frank Blaschka 已提交
3858 3859
	int i;

C
Carsten Otte 已提交
3860
	QETH_CARD_TEXT(card, 6, "qdouhdl");
3861
	if (qdio_error & QDIO_ERROR_FATAL) {
C
Carsten Otte 已提交
3862
		QETH_CARD_TEXT(card, 2, "achkcond");
3863
		netif_tx_stop_all_queues(dev);
J
Jan Glauber 已提交
3864 3865
		qeth_schedule_recovery(card);
		return;
F
Frank Blaschka 已提交
3866
	}
3867

F
Frank Blaschka 已提交
3868
	for (i = first_element; i < (first_element + count); ++i) {
J
Julian Wiedmann 已提交
3869 3870 3871 3872
		struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];

		qeth_handle_send_error(card, buf, qdio_error);
		qeth_clear_output_buffer(queue, buf, qdio_error, 0);
F
Frank Blaschka 已提交
3873
	}
3874

F
Frank Blaschka 已提交
3875
	atomic_sub(count, &queue->used_buffers);
3876
	qeth_check_outbound_queue(queue);
F
Frank Blaschka 已提交
3877

3878 3879 3880 3881 3882 3883 3884
	txq = netdev_get_tx_queue(dev, __queue);
	/* xmit may have observed the full-condition, but not yet stopped the
	 * txq. In which case the code below won't trigger. So before returning,
	 * xmit will re-check the txq's fill level and wake it up if needed.
	 */
	if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
		netif_tx_wake_queue(txq);
F
Frank Blaschka 已提交
3885 3886
}

3887 3888 3889
/**
 * Note: Function assumes that we have 4 outbound queues.
 */
3890
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
F
Frank Blaschka 已提交
3891
{
J
Julian Wiedmann 已提交
3892
	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3893 3894 3895 3896 3897
	u8 tos;

	switch (card->qdio.do_prio_queueing) {
	case QETH_PRIO_Q_ING_TOS:
	case QETH_PRIO_Q_ING_PREC:
3898
		switch (qeth_get_ip_version(skb)) {
3899 3900 3901 3902 3903 3904 3905 3906
		case 4:
			tos = ipv4_get_dsfield(ip_hdr(skb));
			break;
		case 6:
			tos = ipv6_get_dsfield(ipv6_hdr(skb));
			break;
		default:
			return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3907
		}
3908
		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
J
Julian Wiedmann 已提交
3909
			return ~tos >> 6 & 3;
3910
		if (tos & IPTOS_MINCOST)
J
Julian Wiedmann 已提交
3911
			return 3;
3912 3913 3914 3915 3916 3917
		if (tos & IPTOS_RELIABILITY)
			return 2;
		if (tos & IPTOS_THROUGHPUT)
			return 1;
		if (tos & IPTOS_LOWDELAY)
			return 0;
3918 3919 3920 3921
		break;
	case QETH_PRIO_Q_ING_SKB:
		if (skb->priority > 5)
			return 0;
J
Julian Wiedmann 已提交
3922
		return ~skb->priority >> 1 & 3;
3923
	case QETH_PRIO_Q_ING_VLAN:
J
Julian Wiedmann 已提交
3924 3925 3926
		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
			return ~ntohs(veth->h_vlan_TCI) >>
			       (VLAN_PRIO_SHIFT + 1) & 3;
3927
		break;
3928 3929
	case QETH_PRIO_Q_ING_FIXED:
		return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3930
	default:
3931
		break;
F
Frank Blaschka 已提交
3932
	}
3933
	return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3934 3935 3936
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);

3937 3938 3939 3940 3941 3942 3943
/**
 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
 * @skb:				SKB address
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 * fragmented part of the SKB. Returns zero for linear SKB.
 */
3944
static int qeth_get_elements_for_frags(struct sk_buff *skb)
3945
{
3946
	int cnt, elements = 0;
3947 3948

	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3949
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3950 3951 3952 3953

		elements += qeth_get_elements_for_range(
			(addr_t)skb_frag_address(frag),
			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3954 3955 3956 3957
	}
	return elements;
}

3958 3959 3960 3961 3962 3963 3964 3965 3966 3967
/**
 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
 *				to transmit an skb.
 * @skb:			the skb to operate on.
 * @data_offset:		skip this part of the skb's linear data
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
 * skb's data (both its linear part and paged fragments).
 */
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3968 3969 3970 3971 3972 3973 3974 3975 3976
{
	unsigned int elements = qeth_get_elements_for_frags(skb);
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	return elements;
}
3977
EXPORT_SYMBOL_GPL(qeth_count_elements);
F
Frank Blaschka 已提交
3978

3979 3980
#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
					 MAX_TCP_HEADER)
3981

3982
/**
3983 3984
 * qeth_add_hw_header() - add a HW header to an skb.
 * @skb: skb that the HW header should be added to.
3985 3986
 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
 *	 it contains a valid pointer to a qeth_hdr.
3987 3988 3989
 * @hdr_len: length of the HW header.
 * @proto_len: length of protocol headers that need to be in same page as the
 *	       HW header.
3990 3991 3992 3993
 *
 * Returns the pushed length. If the header can't be pushed on
 * (eg. because it would cross a page boundary), it is allocated from
 * the cache instead and 0 is returned.
3994
 * The number of needed buffer elements is returned in @elements.
3995 3996
 * Error to create the hdr is indicated by returning with < 0.
 */
3997 3998 3999 4000
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
			      struct sk_buff *skb, struct qeth_hdr **hdr,
			      unsigned int hdr_len, unsigned int proto_len,
			      unsigned int *elements)
4001
{
4002
	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
4003
	const unsigned int contiguous = proto_len ? proto_len : 1;
4004
	const unsigned int max_elements = queue->max_elements;
4005 4006 4007 4008 4009 4010
	unsigned int __elements;
	addr_t start, end;
	bool push_ok;
	int rc;

check_layout:
4011
	start = (addr_t)skb->data - hdr_len;
4012 4013
	end = (addr_t)skb->data;

4014
	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
4015 4016
		/* Push HW header into same page as first protocol header. */
		push_ok = true;
4017 4018 4019 4020 4021
		/* ... but TSO always needs a separate element for headers: */
		if (skb_is_gso(skb))
			__elements = 1 + qeth_count_elements(skb, proto_len);
		else
			__elements = qeth_count_elements(skb, 0);
J
Julian Wiedmann 已提交
4022 4023
	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
		/* Push HW header into preceding page, flush with skb->data. */
4024
		push_ok = true;
4025
		__elements = 1 + qeth_count_elements(skb, 0);
4026 4027 4028 4029
	} else {
		/* Use header cache, copy protocol headers up. */
		push_ok = false;
		__elements = 1 + qeth_count_elements(skb, proto_len);
4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041
	}

	/* Compress skb to fit into one IO buffer: */
	if (__elements > max_elements) {
		if (!skb_is_nonlinear(skb)) {
			/* Drop it, no easy way of shrinking it further. */
			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
					 max_elements, __elements, skb->len);
			return -E2BIG;
		}

		rc = skb_linearize(skb);
4042 4043
		if (rc) {
			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4044
			return rc;
4045
		}
4046

4047
		QETH_TXQ_STAT_INC(queue, skbs_linearized);
4048 4049 4050 4051 4052 4053 4054
		/* Linearization changed the layout, re-evaluate: */
		goto check_layout;
	}

	*elements = __elements;
	/* Add the header: */
	if (push_ok) {
4055 4056
		*hdr = skb_push(skb, hdr_len);
		return hdr_len;
4057
	}
4058 4059

	/* Fall back to cache element with known-good alignment: */
4060 4061
	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
		return -E2BIG;
4062
	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4063 4064
	if (!*hdr)
		return -ENOMEM;
4065 4066
	/* Copy protocol headers behind HW header: */
	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4067 4068 4069
	return 0;
}

4070 4071 4072 4073
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
			      struct sk_buff *curr_skb,
			      struct qeth_hdr *curr_hdr)
{
J
Julian Wiedmann 已提交
4074
	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092
	struct qeth_hdr *prev_hdr = queue->prev_hdr;

	if (!prev_hdr)
		return true;

	/* All packets must have the same target: */
	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);

		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
					eth_hdr(curr_skb)->h_dest) &&
		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
	}

	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}

4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104
/**
 * qeth_fill_buffer() - map skb into an output buffer
 * @buf:	buffer to transport the skb
 * @skb:	skb to map into the buffer
 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
 *		from qeth_core_header_cache.
 * @offset:	when mapping the skb, start at skb->data + offset
 * @hd_len:	if > 0, build a dedicated header element of this size
 */
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
				     struct sk_buff *skb, struct qeth_hdr *hdr,
				     unsigned int offset, unsigned int hd_len)
F
Frank Blaschka 已提交
4105
{
4106 4107
	struct qdio_buffer *buffer = buf->buffer;
	int element = buf->next_element_to_fill;
4108 4109
	int length = skb_headlen(skb) - offset;
	char *data = skb->data + offset;
J
Julian Wiedmann 已提交
4110
	unsigned int elem_length, cnt;
4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121
	bool is_first_elem = true;

	__skb_queue_tail(&buf->skb_list, skb);

	/* build dedicated element for HW Header */
	if (hd_len) {
		is_first_elem = false;

		buffer->element[element].addr = virt_to_phys(hdr);
		buffer->element[element].length = hd_len;
		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4122 4123 4124 4125 4126 4127 4128 4129 4130 4131

		/* HW header is allocated from cache: */
		if ((void *)hdr != skb->data)
			buf->is_header[element] = 1;
		/* HW header was pushed and is contiguous with linear part: */
		else if (length > 0 && !PAGE_ALIGNED(data) &&
			 (data == (char *)hdr + hd_len))
			buffer->element[element].eflags |=
				SBAL_EFLAGS_CONTIGUOUS;

4132 4133
		element++;
	}
F
Frank Blaschka 已提交
4134

4135
	/* map linear part into buffer element(s) */
F
Frank Blaschka 已提交
4136
	while (length > 0) {
J
Julian Wiedmann 已提交
4137 4138
		elem_length = min_t(unsigned int, length,
				    PAGE_SIZE - offset_in_page(data));
F
Frank Blaschka 已提交
4139

4140
		buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4141 4142
		buffer->element[element].length = elem_length;
		length -= elem_length;
4143 4144
		if (is_first_elem) {
			is_first_elem = false;
4145 4146
			if (length || skb_is_nonlinear(skb))
				/* skb needs additional elements */
4147
				buffer->element[element].eflags =
4148
					SBAL_EFLAGS_FIRST_FRAG;
F
Frank Blaschka 已提交
4149
			else
4150 4151 4152 4153
				buffer->element[element].eflags = 0;
		} else {
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
F
Frank Blaschka 已提交
4154
		}
J
Julian Wiedmann 已提交
4155 4156

		data += elem_length;
F
Frank Blaschka 已提交
4157 4158
		element++;
	}
4159

4160
	/* map page frags into buffer element(s) */
4161
	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4162 4163 4164 4165
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];

		data = skb_frag_address(frag);
		length = skb_frag_size(frag);
4166
		while (length > 0) {
J
Julian Wiedmann 已提交
4167 4168
			elem_length = min_t(unsigned int, length,
					    PAGE_SIZE - offset_in_page(data));
4169

4170
			buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4171
			buffer->element[element].length = elem_length;
4172 4173
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
J
Julian Wiedmann 已提交
4174 4175 4176

			length -= elem_length;
			data += elem_length;
4177 4178
			element++;
		}
4179 4180
	}

4181 4182
	if (buffer->element[element - 1].eflags)
		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4183
	buf->next_element_to_fill = element;
4184
	return element;
F
Frank Blaschka 已提交
4185 4186
}

4187 4188 4189 4190
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
		       struct sk_buff *skb, unsigned int elements,
		       struct qeth_hdr *hdr, unsigned int offset,
		       unsigned int hd_len)
F
Frank Blaschka 已提交
4191
{
4192
	unsigned int bytes = qdisc_pkt_len(skb);
J
Julian Wiedmann 已提交
4193
	struct qeth_qdio_out_buffer *buffer;
4194
	unsigned int next_element;
4195 4196
	struct netdev_queue *txq;
	bool stopped = false;
4197 4198
	bool flush;

J
Julian Wiedmann 已提交
4199
	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4200
	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
F
Frank Blaschka 已提交
4201

4202 4203
	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4204 4205
	 */
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4206
		return -EBUSY;
4207

J
Julian Wiedmann 已提交
4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224
	flush = !qeth_iqd_may_bulk(queue, skb, hdr);

	if (flush ||
	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
		if (buffer->next_element_to_fill > 0) {
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			queue->bulk_count++;
		}

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);

		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
						queue->bulk_count)];
4225

4226 4227 4228 4229 4230 4231 4232
		/* Sanity-check again: */
		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
			return -EBUSY;
	}

	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4233 4234 4235 4236 4237 4238 4239 4240
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4241
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4242
	buffer->bytes += bytes;
4243
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4244
	queue->prev_hdr = hdr;
4245

4246 4247 4248 4249 4250
	flush = __netdev_tx_sent_queue(txq, bytes,
				       !stopped && netdev_xmit_more());

	if (flush || next_element >= queue->max_elements) {
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4251 4252 4253 4254 4255 4256 4257
		queue->bulk_count++;

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);
4258
	}
4259 4260 4261

	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4262 4263 4264 4265
	return 0;
}

int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4266
			struct sk_buff *skb, struct qeth_hdr *hdr,
4267 4268
			unsigned int offset, unsigned int hd_len,
			int elements_needed)
F
Frank Blaschka 已提交
4269
{
4270
	unsigned int start_index = queue->next_buf_to_fill;
F
Frank Blaschka 已提交
4271
	struct qeth_qdio_out_buffer *buffer;
4272
	unsigned int next_element;
4273 4274
	struct netdev_queue *txq;
	bool stopped = false;
F
Frank Blaschka 已提交
4275 4276 4277 4278
	int flush_count = 0;
	int do_pack = 0;
	int rc = 0;

4279
	buffer = queue->bufs[queue->next_buf_to_fill];
4280 4281 4282

	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4283
	 */
4284
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
F
Frank Blaschka 已提交
4285
		return -EBUSY;
4286 4287 4288

	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));

F
Frank Blaschka 已提交
4289 4290 4291 4292
	/* check if we need to switch packing state of this queue */
	qeth_switch_to_packing_if_needed(queue);
	if (queue->do_pack) {
		do_pack = 1;
F
Frank Blaschka 已提交
4293
		/* does packet fit in current buffer? */
4294 4295
		if (buffer->next_element_to_fill + elements_needed >
		    queue->max_elements) {
F
Frank Blaschka 已提交
4296 4297 4298 4299
			/* ... no -> set state PRIMED */
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			flush_count++;
			queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
4300
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4301
			buffer = queue->bufs[queue->next_buf_to_fill];
4302 4303

			/* We stepped forward, so sanity-check again: */
F
Frank Blaschka 已提交
4304 4305 4306
			if (atomic_read(&buffer->state) !=
			    QETH_QDIO_BUF_EMPTY) {
				qeth_flush_buffers(queue, start_index,
J
Jan Glauber 已提交
4307
							   flush_count);
4308 4309
				rc = -EBUSY;
				goto out;
F
Frank Blaschka 已提交
4310 4311 4312
			}
		}
	}
4313

4314 4315 4316 4317 4318 4319 4320 4321 4322 4323
	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4324
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4325 4326
	buffer->bytes += qdisc_pkt_len(skb);
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4327 4328 4329 4330 4331 4332

	if (queue->do_pack)
		QETH_TXQ_STAT_INC(queue, skbs_pack);
	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
		flush_count++;
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4333 4334
		queue->next_buf_to_fill =
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4335 4336
	}

F
Frank Blaschka 已提交
4337
	if (flush_count)
J
Jan Glauber 已提交
4338
		qeth_flush_buffers(queue, start_index, flush_count);
4339

4340
out:
4341 4342
	if (do_pack)
		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
F
Frank Blaschka 已提交
4343

4344 4345
	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4346 4347 4348 4349
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);

J
Julian Wiedmann 已提交
4350 4351 4352
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
			      unsigned int payload_len, struct sk_buff *skb,
			      unsigned int proto_len)
4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365
{
	struct qeth_hdr_ext_tso *ext = &hdr->ext;

	ext->hdr_tot_len = sizeof(*ext);
	ext->imb_hdr_no = 1;
	ext->hdr_type = 1;
	ext->hdr_version = 1;
	ext->hdr_len = 28;
	ext->payload_len = payload_len;
	ext->mss = skb_shinfo(skb)->gso_size;
	ext->dg_hdr_len = proto_len;
}

4366
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4367
	      struct qeth_qdio_out_q *queue, int ipv,
4368 4369
	      void (*fill_header)(struct qeth_qdio_out_q *queue,
				  struct qeth_hdr *hdr, struct sk_buff *skb,
4370
				  int ipv, unsigned int data_len))
4371
{
4372
	unsigned int proto_len, hw_hdr_len;
4373
	unsigned int frame_len = skb->len;
4374
	bool is_tso = skb_is_gso(skb);
4375 4376 4377 4378 4379 4380
	unsigned int data_offset = 0;
	struct qeth_hdr *hdr = NULL;
	unsigned int hd_len = 0;
	unsigned int elements;
	int push_len, rc;

4381 4382 4383 4384 4385
	if (is_tso) {
		hw_hdr_len = sizeof(struct qeth_hdr_tso);
		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	} else {
		hw_hdr_len = sizeof(struct qeth_hdr);
J
Julian Wiedmann 已提交
4386
		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4387 4388
	}

4389 4390 4391 4392
	rc = skb_cow_head(skb, hw_hdr_len);
	if (rc)
		return rc;

4393
	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4394 4395 4396
				      &elements);
	if (push_len < 0)
		return push_len;
4397
	if (is_tso || !push_len) {
4398 4399
		/* HW header needs its own buffer element. */
		hd_len = hw_hdr_len + proto_len;
4400
		data_offset = push_len + proto_len;
4401
	}
4402
	memset(hdr, 0, hw_hdr_len);
4403
	fill_header(queue, hdr, skb, ipv, frame_len);
4404 4405 4406
	if (is_tso)
		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
				  frame_len - proto_len, skb, proto_len);
4407 4408

	if (IS_IQD(card)) {
4409 4410
		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
				 hd_len);
4411 4412 4413
	} else {
		/* TODO: drop skb_orphan() once TX completion is fast enough */
		skb_orphan(skb);
4414
		spin_lock(&queue->lock);
4415 4416
		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
					 hd_len, elements);
4417
		spin_unlock(&queue->lock);
4418 4419
	}

4420 4421 4422
	if (rc && !push_len)
		kmem_cache_free(qeth_core_header_cache, hdr);

4423 4424 4425 4426
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);

F
Frank Blaschka 已提交
4427 4428 4429
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4430
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
F
Frank Blaschka 已提交
4431 4432
	struct qeth_ipacmd_setadpparms *setparms;

C
Carsten Otte 已提交
4433
	QETH_CARD_TEXT(card, 4, "prmadpcb");
F
Frank Blaschka 已提交
4434 4435

	setparms = &(cmd->data.setadapterparms);
4436
	if (qeth_setadpparms_inspect_rc(cmd)) {
4437
		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
F
Frank Blaschka 已提交
4438 4439 4440
		setparms->data.mode = SET_PROMISC_MODE_OFF;
	}
	card->info.promisc_mode = setparms->data.mode;
4441
	return (cmd->hdr.return_code) ? -EIO : 0;
F
Frank Blaschka 已提交
4442 4443
}

4444
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
F
Frank Blaschka 已提交
4445
{
4446 4447
	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
						    SET_PROMISC_MODE_OFF;
F
Frank Blaschka 已提交
4448 4449 4450
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4451 4452
	QETH_CARD_TEXT(card, 4, "setprom");
	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
F
Frank Blaschka 已提交
4453 4454

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4455
				   SETADP_DATA_SIZEOF(mode));
4456 4457
	if (!iob)
		return;
4458
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4459 4460 4461 4462 4463 4464 4465 4466
	cmd->data.setadapterparms.data.mode = mode;
	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);

static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4467
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4468
	struct qeth_ipacmd_setadpparms *adp_cmd;
F
Frank Blaschka 已提交
4469

C
Carsten Otte 已提交
4470
	QETH_CARD_TEXT(card, 4, "chgmaccb");
4471
	if (qeth_setadpparms_inspect_rc(cmd))
4472
		return -EIO;
F
Frank Blaschka 已提交
4473

4474
	adp_cmd = &cmd->data.setadapterparms;
4475 4476 4477
	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
		return -EADDRNOTAVAIL;

4478 4479
	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4480
		return -EADDRNOTAVAIL;
4481 4482

	ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
F
Frank Blaschka 已提交
4483 4484 4485 4486 4487 4488 4489 4490 4491
	return 0;
}

int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4492
	QETH_CARD_TEXT(card, 4, "chgmac");
F
Frank Blaschka 已提交
4493 4494

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4495
				   SETADP_DATA_SIZEOF(change_addr));
4496 4497
	if (!iob)
		return -ENOMEM;
4498
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4499
	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4500 4501 4502
	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
			card->dev->dev_addr);
F
Frank Blaschka 已提交
4503 4504 4505 4506 4507 4508
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
			       NULL);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);

E
Einar Lueck 已提交
4509 4510 4511
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4512
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
E
Einar Lueck 已提交
4513 4514
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4515
	QETH_CARD_TEXT(card, 4, "setaccb");
E
Einar Lueck 已提交
4516 4517

	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4518 4519
	QETH_CARD_TEXT_(card, 2, "rc=%d",
			cmd->data.setadapterparms.hdr.return_code);
S
Stefan Raspl 已提交
4520 4521
	if (cmd->data.setadapterparms.hdr.return_code !=
						SET_ACCESS_CTRL_RC_SUCCESS)
4522 4523 4524
		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
				 cmd->data.setadapterparms.hdr.return_code);
4525
	switch (qeth_setadpparms_inspect_rc(cmd)) {
E
Einar Lueck 已提交
4526
	case SET_ACCESS_CTRL_RC_SUCCESS:
4527
		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
E
Einar Lueck 已提交
4528 4529
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is deactivated\n");
4530
		else
E
Einar Lueck 已提交
4531 4532
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is activated\n");
4533
		return 0;
S
Stefan Raspl 已提交
4534
	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4535 4536
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
				 CARD_DEVID(card));
4537
		return 0;
S
Stefan Raspl 已提交
4538
	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4539 4540
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
				 CARD_DEVID(card));
4541
		return 0;
E
Einar Lueck 已提交
4542 4543 4544
	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
		dev_err(&card->gdev->dev, "Adapter does not "
			"support QDIO data connection isolation\n");
4545
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4546 4547 4548 4549
	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
		dev_err(&card->gdev->dev,
			"Adapter is dedicated. "
			"QDIO data connection isolation not supported\n");
4550
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4551 4552 4553
	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
		dev_err(&card->gdev->dev,
			"TSO does not permit QDIO data connection isolation\n");
4554
		return -EPERM;
S
Stefan Raspl 已提交
4555 4556 4557
	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
		dev_err(&card->gdev->dev, "The adjacent switch port does not "
			"support reflective relay mode\n");
4558
		return -EOPNOTSUPP;
S
Stefan Raspl 已提交
4559 4560 4561
	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
					"enabled at the adjacent switch port");
4562
		return -EREMOTEIO;
S
Stefan Raspl 已提交
4563 4564 4565
	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
					"at the adjacent switch failed\n");
4566 4567
		/* benign error while disabling ISOLATION_MODE_FWD */
		return 0;
E
Einar Lueck 已提交
4568
	default:
4569
		return -EIO;
E
Einar Lueck 已提交
4570 4571 4572
	}
}

4573 4574
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
				     enum qeth_ipa_isolation_modes mode)
E
Einar Lueck 已提交
4575 4576 4577 4578 4579 4580
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4581
	QETH_CARD_TEXT(card, 4, "setacctl");
E
Einar Lueck 已提交
4582

4583 4584 4585 4586 4587 4588
	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
		dev_err(&card->gdev->dev,
			"Adapter does not support QDIO data connection isolation\n");
		return -EOPNOTSUPP;
	}

E
Einar Lueck 已提交
4589
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4590
				   SETADP_DATA_SIZEOF(set_access_ctrl));
4591 4592
	if (!iob)
		return -ENOMEM;
4593
	cmd = __ipa_cmd(iob);
E
Einar Lueck 已提交
4594
	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4595
	access_ctrl_req->subcmd_code = mode;
E
Einar Lueck 已提交
4596 4597

	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4598
			       NULL);
4599
	if (rc) {
4600
		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4601 4602
		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
				 rc, CARD_DEVID(card));
E
Einar Lueck 已提交
4603
	}
4604

E
Einar Lueck 已提交
4605 4606 4607
	return rc;
}

4608
void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
F
Frank Blaschka 已提交
4609 4610 4611
{
	struct qeth_card *card;

4612
	card = dev->ml_priv;
C
Carsten Otte 已提交
4613
	QETH_CARD_TEXT(card, 4, "txtimeo");
F
Frank Blaschka 已提交
4614 4615 4616 4617
	qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);

4618
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
F
Frank Blaschka 已提交
4619
{
4620
	struct qeth_card *card = dev->ml_priv;
F
Frank Blaschka 已提交
4621 4622 4623 4624 4625 4626 4627
	int rc = 0;

	switch (regnum) {
	case MII_BMCR: /* Basic mode control register */
		rc = BMCR_FULLDPLX;
		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4628 4629
		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
F
Frank Blaschka 已提交
4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660
			rc |= BMCR_SPEED100;
		break;
	case MII_BMSR: /* Basic mode status register */
		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
		     BMSR_100BASE4;
		break;
	case MII_PHYSID1: /* PHYS ID 1 */
		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
		     dev->dev_addr[2];
		rc = (rc >> 5) & 0xFFFF;
		break;
	case MII_PHYSID2: /* PHYS ID 2 */
		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
		break;
	case MII_ADVERTISE: /* Advertisement control reg */
		rc = ADVERTISE_ALL;
		break;
	case MII_LPA: /* Link partner ability reg */
		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
		     LPA_100BASE4 | LPA_LPACK;
		break;
	case MII_EXPANSION: /* Expansion register */
		break;
	case MII_DCOUNTER: /* disconnect counter */
		break;
	case MII_FCSCOUNTER: /* false carrier counter */
		break;
	case MII_NWAYTEST: /* N-way auto-neg test register */
		break;
	case MII_RERRCOUNTER: /* rx error counter */
4661 4662 4663
		rc = card->stats.rx_length_errors +
		     card->stats.rx_frame_errors +
		     card->stats.rx_fifo_errors;
F
Frank Blaschka 已提交
4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685
		break;
	case MII_SREVISION: /* silicon revision */
		break;
	case MII_RESV1: /* reserved 1 */
		break;
	case MII_LBRERROR: /* loopback, rx, bypass error */
		break;
	case MII_PHYADDR: /* physical address */
		break;
	case MII_RESV2: /* reserved 2 */
		break;
	case MII_TPISTATUS: /* TPI status for 10mbps */
		break;
	case MII_NCONFIG: /* network interface config */
		break;
	default:
		break;
	}
	return rc;
}

static int qeth_snmp_command_cb(struct qeth_card *card,
4686
				struct qeth_reply *reply, unsigned long data)
F
Frank Blaschka 已提交
4687
{
4688 4689 4690 4691
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_arp_query_info *qinfo = reply->param;
	struct qeth_ipacmd_setadpparms *adp_cmd;
	unsigned int data_len;
4692
	void *snmp_data;
F
Frank Blaschka 已提交
4693

C
Carsten Otte 已提交
4694
	QETH_CARD_TEXT(card, 3, "snpcmdcb");
F
Frank Blaschka 已提交
4695 4696

	if (cmd->hdr.return_code) {
4697
		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4698
		return -EIO;
F
Frank Blaschka 已提交
4699 4700 4701 4702
	}
	if (cmd->data.setadapterparms.hdr.return_code) {
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
4703
		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4704
		return -EIO;
F
Frank Blaschka 已提交
4705
	}
4706 4707 4708 4709 4710

	adp_cmd = &cmd->data.setadapterparms;
	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
	if (adp_cmd->hdr.seq_no == 1) {
		snmp_data = &adp_cmd->data.snmp;
4711
	} else {
4712 4713
		snmp_data = &adp_cmd->data.snmp.request;
		data_len -= offsetof(struct qeth_snmp_cmd, request);
4714
	}
F
Frank Blaschka 已提交
4715 4716 4717

	/* check if there is enough room in userspace */
	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4718 4719
		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
		return -ENOSPC;
F
Frank Blaschka 已提交
4720
	}
C
Carsten Otte 已提交
4721
	QETH_CARD_TEXT_(card, 4, "snore%i",
4722
			cmd->data.setadapterparms.hdr.used_total);
C
Carsten Otte 已提交
4723
	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4724
			cmd->data.setadapterparms.hdr.seq_no);
F
Frank Blaschka 已提交
4725
	/*copy entries to user buffer*/
4726
	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
F
Frank Blaschka 已提交
4727
	qinfo->udata_offset += data_len;
4728

F
Frank Blaschka 已提交
4729 4730 4731 4732 4733 4734
	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4735
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
F
Frank Blaschka 已提交
4736
{
4737
	struct qeth_snmp_ureq __user *ureq;
F
Frank Blaschka 已提交
4738
	struct qeth_cmd_buffer *iob;
4739
	unsigned int req_len;
F
Frank Blaschka 已提交
4740 4741 4742
	struct qeth_arp_query_info qinfo = {0, };
	int rc = 0;

C
Carsten Otte 已提交
4743
	QETH_CARD_TEXT(card, 3, "snmpcmd");
F
Frank Blaschka 已提交
4744

4745
	if (IS_VM_NIC(card))
F
Frank Blaschka 已提交
4746 4747 4748
		return -EOPNOTSUPP;

	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4749
	    IS_LAYER3(card))
F
Frank Blaschka 已提交
4750
		return -EOPNOTSUPP;
4751

4752 4753 4754 4755 4756
	ureq = (struct qeth_snmp_ureq __user *) udata;
	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
	    get_user(req_len, &ureq->hdr.req_len))
		return -EFAULT;

4757 4758 4759 4760
	/* Sanitize user input, to avoid overflows in iob size calculation: */
	if (req_len > QETH_BUFSIZE)
		return -EINVAL;

4761 4762 4763 4764 4765 4766 4767
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
	if (!iob)
		return -ENOMEM;

	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
			   &ureq->cmd, req_len)) {
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4768
		return -EFAULT;
4769 4770
	}

F
Frank Blaschka 已提交
4771 4772
	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
	if (!qinfo.udata) {
4773
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4774 4775 4776 4777
		return -ENOMEM;
	}
	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);

4778
	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
F
Frank Blaschka 已提交
4779
	if (rc)
4780 4781
		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
				 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
4782 4783 4784 4785
	else {
		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
			rc = -EFAULT;
	}
4786

F
Frank Blaschka 已提交
4787 4788 4789 4790
	kfree(qinfo.udata);
	return rc;
}

4791
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
J
Julian Wiedmann 已提交
4792 4793
					 struct qeth_reply *reply,
					 unsigned long data)
4794
{
4795
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
J
Julian Wiedmann 已提交
4796
	struct qeth_qoat_priv *priv = reply->param;
4797 4798 4799
	int resdatalen;

	QETH_CARD_TEXT(card, 3, "qoatcb");
4800
	if (qeth_setadpparms_inspect_rc(cmd))
4801
		return -EIO;
4802 4803 4804

	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;

4805 4806
	if (resdatalen > (priv->buffer_len - priv->response_len))
		return -ENOSPC;
4807

4808 4809
	memcpy(priv->buffer + priv->response_len,
	       &cmd->data.setadapterparms.hdr, resdatalen);
4810 4811 4812 4813 4814 4815 4816 4817
	priv->response_len += resdatalen;

	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4818
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829
{
	int rc = 0;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_query_oat *oat_req;
	struct qeth_query_oat_data oat_data;
	struct qeth_qoat_priv priv;
	void __user *tmp;

	QETH_CARD_TEXT(card, 3, "qoatcmd");

J
Julian Wiedmann 已提交
4830 4831
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
		return -EOPNOTSUPP;
4832

J
Julian Wiedmann 已提交
4833 4834
	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
		return -EFAULT;
4835 4836 4837

	priv.buffer_len = oat_data.buffer_len;
	priv.response_len = 0;
4838
	priv.buffer = vzalloc(oat_data.buffer_len);
J
Julian Wiedmann 已提交
4839 4840
	if (!priv.buffer)
		return -ENOMEM;
4841 4842

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4843
				   SETADP_DATA_SIZEOF(query_oat));
4844 4845 4846 4847
	if (!iob) {
		rc = -ENOMEM;
		goto out_free;
	}
4848
	cmd = __ipa_cmd(iob);
4849 4850 4851
	oat_req = &cmd->data.setadapterparms.data.query_oat;
	oat_req->subcmd_code = oat_data.command;

J
Julian Wiedmann 已提交
4852
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4853
	if (!rc) {
4854 4855
		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
					 u64_to_user_ptr(oat_data.ptr);
4856 4857
		oat_data.response_len = priv.response_len;

J
Julian Wiedmann 已提交
4858 4859
		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4860
			rc = -EFAULT;
4861
	}
4862 4863

out_free:
4864
	vfree(priv.buffer);
4865 4866 4867
	return rc;
}

4868 4869
static int qeth_query_card_info_cb(struct qeth_card *card,
				   struct qeth_reply *reply, unsigned long data)
E
Eugene Crosser 已提交
4870
{
4871
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4872
	struct qeth_link_info *link_info = reply->param;
E
Eugene Crosser 已提交
4873 4874 4875
	struct qeth_query_card_info *card_info;

	QETH_CARD_TEXT(card, 2, "qcrdincb");
4876
	if (qeth_setadpparms_inspect_rc(cmd))
4877
		return -EIO;
E
Eugene Crosser 已提交
4878

4879
	card_info = &cmd->data.setadapterparms.data.card_info;
4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935
	netdev_dbg(card->dev,
		   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
		   card_info->card_type, card_info->port_mode,
		   card_info->port_speed);

	switch (card_info->port_mode) {
	case CARD_INFO_PORTM_FULLDUPLEX:
		link_info->duplex = DUPLEX_FULL;
		break;
	case CARD_INFO_PORTM_HALFDUPLEX:
		link_info->duplex = DUPLEX_HALF;
		break;
	default:
		link_info->duplex = DUPLEX_UNKNOWN;
	}

	switch (card_info->card_type) {
	case CARD_INFO_TYPE_1G_COPPER_A:
	case CARD_INFO_TYPE_1G_COPPER_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_TP;
		break;
	case CARD_INFO_TYPE_1G_FIBRE_A:
	case CARD_INFO_TYPE_1G_FIBRE_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_FIBRE;
		break;
	case CARD_INFO_TYPE_10G_FIBRE_A:
	case CARD_INFO_TYPE_10G_FIBRE_B:
		link_info->speed = SPEED_10000;
		link_info->port = PORT_FIBRE;
		break;
	default:
		switch (card_info->port_speed) {
		case CARD_INFO_PORTS_10M:
			link_info->speed = SPEED_10;
			break;
		case CARD_INFO_PORTS_100M:
			link_info->speed = SPEED_100;
			break;
		case CARD_INFO_PORTS_1G:
			link_info->speed = SPEED_1000;
			break;
		case CARD_INFO_PORTS_10G:
			link_info->speed = SPEED_10000;
			break;
		case CARD_INFO_PORTS_25G:
			link_info->speed = SPEED_25000;
			break;
		default:
			link_info->speed = SPEED_UNKNOWN;
		}

		link_info->port = PORT_OTHER;
	}

E
Eugene Crosser 已提交
4936 4937 4938
	return 0;
}

4939
int qeth_query_card_info(struct qeth_card *card,
4940
			 struct qeth_link_info *link_info)
E
Eugene Crosser 已提交
4941 4942 4943 4944 4945 4946
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qcrdinfo");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
		return -EOPNOTSUPP;
4947
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4948 4949
	if (!iob)
		return -ENOMEM;
4950 4951

	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
E
Eugene Crosser 已提交
4952 4953
}

4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989
static void qeth_init_link_info(struct qeth_card *card)
{
	card->info.link_info.duplex = DUPLEX_FULL;

	if (IS_IQD(card) || IS_VM_NIC(card)) {
		card->info.link_info.speed = SPEED_10000;
		card->info.link_info.port = PORT_FIBRE;
	} else {
		switch (card->info.link_type) {
		case QETH_LINK_TYPE_FAST_ETH:
		case QETH_LINK_TYPE_LANE_ETH100:
			card->info.link_info.speed = SPEED_100;
			card->info.link_info.port = PORT_TP;
			break;
		case QETH_LINK_TYPE_GBIT_ETH:
		case QETH_LINK_TYPE_LANE_ETH1000:
			card->info.link_info.speed = SPEED_1000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_10GBIT_ETH:
			card->info.link_info.speed = SPEED_10000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_25GBIT_ETH:
			card->info.link_info.speed = SPEED_25000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		default:
			dev_info(&card->gdev->dev, "Unknown link type %x\n",
				 card->info.link_type);
			card->info.link_info.speed = SPEED_UNKNOWN;
			card->info.link_info.port = PORT_OTHER;
		}
	}
}

4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003
/**
 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
 * @card: pointer to a qeth_card
 *
 * Returns
 *	0, if a MAC address has been set for the card's netdevice
 *	a return code, for various error conditions
 */
int qeth_vm_request_mac(struct qeth_card *card)
{
	struct diag26c_mac_resp *response;
	struct diag26c_mac_req *request;
	int rc;

5004
	QETH_CARD_TEXT(card, 2, "vmreqmac");
5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION2;
	request->op_code = DIAG26C_GET_MAC;
5016
	request->devno = card->info.ddev_devno;
5017

5018
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5019
	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5020
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5021 5022
	if (rc)
		goto out;
5023
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5024 5025 5026 5027

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
5028 5029 5030
		QETH_CARD_TEXT(card, 2, "badresp");
		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
			      sizeof(request->resp_buf_len));
5031 5032
	} else if (!is_valid_ether_addr(response->mac)) {
		rc = -EINVAL;
5033 5034
		QETH_CARD_TEXT(card, 2, "badmac");
		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045
	} else {
		ether_addr_copy(card->dev->dev_addr, response->mac);
	}

out:
	kfree(response);
	kfree(request);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);

5046 5047
static void qeth_determine_capabilities(struct qeth_card *card)
{
5048 5049
	struct qeth_channel *channel = &card->data;
	struct ccw_device *ddev = channel->ccwdev;
5050 5051 5052
	int rc;
	int ddev_offline = 0;

5053
	QETH_CARD_TEXT(card, 2, "detcapab");
5054 5055
	if (!ddev->online) {
		ddev_offline = 1;
5056
		rc = qeth_start_channel(channel);
5057
		if (rc) {
5058
			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5059 5060 5061 5062
			goto out;
		}
	}

5063
	rc = qeth_read_conf_data(card);
5064
	if (rc) {
5065 5066
		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
				 CARD_DEVID(card), rc);
5067
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5068 5069 5070 5071 5072
		goto out_offline;
	}

	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
	if (rc)
5073
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5074

5075 5076 5077 5078 5079
	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5080 5081 5082 5083 5084 5085 5086 5087 5088
	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
		dev_info(&card->gdev->dev,
			"Completion Queueing supported\n");
	} else {
		card->options.cq = QETH_CQ_NOTAVAILABLE;
	}

5089 5090
out_offline:
	if (ddev_offline == 1)
5091
		qeth_stop_channel(channel);
5092 5093 5094 5095
out:
	return;
}

5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122
static void qeth_read_ccw_conf_data(struct qeth_card *card)
{
	struct qeth_card_info *info = &card->info;
	struct ccw_device *cdev = CARD_DDEV(card);
	struct ccw_dev_id dev_id;

	QETH_CARD_TEXT(card, 2, "ccwconfd");
	ccw_device_get_id(cdev, &dev_id);

	info->ddev_devno = dev_id.devno;
	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
			  !ccw_device_get_iid(cdev, &info->iid) &&
			  !ccw_device_get_chid(cdev, 0, &info->chid);
	info->ssid = dev_id.ssid;

	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
		 info->chid, info->chpid);

	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
}

F
Frank Blaschka 已提交
5123 5124
static int qeth_qdio_establish(struct qeth_card *card)
{
5125 5126
	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5127
	struct qeth_qib_parms *qib_parms = NULL;
F
Frank Blaschka 已提交
5128
	struct qdio_initialize init_data;
5129
	unsigned int i;
F
Frank Blaschka 已提交
5130 5131
	int rc = 0;

5132
	QETH_CARD_TEXT(card, 2, "qdioest");
F
Frank Blaschka 已提交
5133

5134 5135 5136 5137
	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
		if (!qib_parms)
			return -ENOMEM;
F
Frank Blaschka 已提交
5138

5139 5140
		qeth_fill_qib_parms(card, qib_parms);
	}
F
Frank Blaschka 已提交
5141

5142 5143 5144
	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
	if (card->options.cq == QETH_CQ_ENABLED)
		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5145

5146 5147
	for (i = 0; i < card->qdio.no_out_queues; i++)
		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
F
Frank Blaschka 已提交
5148 5149

	memset(&init_data, 0, sizeof(struct qdio_initialize));
5150 5151
	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
							  QDIO_QETH_QFMT;
F
Frank Blaschka 已提交
5152
	init_data.qib_param_field_format = 0;
5153
	init_data.qib_param_field	 = (void *)qib_parms;
5154
	init_data.no_input_qs            = card->qdio.no_in_queues;
F
Frank Blaschka 已提交
5155
	init_data.no_output_qs           = card->qdio.no_out_queues;
5156 5157
	init_data.input_handler		 = qeth_qdio_input_handler;
	init_data.output_handler	 = qeth_qdio_output_handler;
5158
	init_data.irq_poll		 = qeth_qdio_poll;
F
Frank Blaschka 已提交
5159
	init_data.int_parm               = (unsigned long) card;
5160 5161
	init_data.input_sbal_addr_array  = in_sbal_ptrs;
	init_data.output_sbal_addr_array = out_sbal_ptrs;
5162
	init_data.output_sbal_state_array = card->qdio.out_bufstates;
5163
	init_data.scan_threshold	 = IS_IQD(card) ? 0 : 32;
F
Frank Blaschka 已提交
5164 5165 5166

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5167 5168
		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
				   init_data.no_output_qs);
J
Jan Glauber 已提交
5169 5170 5171 5172
		if (rc) {
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
			goto out;
		}
5173
		rc = qdio_establish(CARD_DDEV(card), &init_data);
J
Jan Glauber 已提交
5174
		if (rc) {
F
Frank Blaschka 已提交
5175
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
J
Jan Glauber 已提交
5176 5177
			qdio_free(CARD_DDEV(card));
		}
F
Frank Blaschka 已提交
5178
	}
5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189

	switch (card->options.cq) {
	case QETH_CQ_ENABLED:
		dev_info(&card->gdev->dev, "Completion Queue support enabled");
		break;
	case QETH_CQ_DISABLED:
		dev_info(&card->gdev->dev, "Completion Queue support disabled");
		break;
	default:
		break;
	}
5190

J
Jan Glauber 已提交
5191
out:
5192
	kfree(qib_parms);
F
Frank Blaschka 已提交
5193 5194 5195 5196 5197
	return rc;
}

static void qeth_core_free_card(struct qeth_card *card)
{
5198
	QETH_CARD_TEXT(card, 2, "freecrd");
5199 5200 5201

	unregister_service_level(&card->qeth_service_level);
	debugfs_remove_recursive(card->debugfs);
5202
	qeth_put_cmd(card->read_cmd);
5203
	destroy_workqueue(card->event_wq);
5204
	dev_set_drvdata(&card->gdev->dev, NULL);
F
Frank Blaschka 已提交
5205 5206 5207
	kfree(card);
}

5208
static void qeth_trace_features(struct qeth_card *card)
5209 5210
{
	QETH_CARD_TEXT(card, 2, "features");
5211 5212 5213 5214 5215
	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
		      sizeof(card->info.diagass_support));
5216 5217
}

F
Frank Blaschka 已提交
5218
static struct ccw_device_id qeth_ids[] = {
5219 5220 5221 5222
	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
					.driver_info = QETH_CARD_TYPE_OSD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
					.driver_info = QETH_CARD_TYPE_IQD},
5223
#ifdef CONFIG_QETH_OSN
5224 5225
	{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
					.driver_info = QETH_CARD_TYPE_OSN},
5226
#endif
5227 5228
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
					.driver_info = QETH_CARD_TYPE_OSM},
5229
#ifdef CONFIG_QETH_OSX
5230 5231
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
					.driver_info = QETH_CARD_TYPE_OSX},
5232
#endif
F
Frank Blaschka 已提交
5233 5234 5235 5236 5237
	{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);

static struct ccw_driver qeth_ccw_driver = {
5238
	.driver = {
S
Sebastian Ott 已提交
5239
		.owner = THIS_MODULE,
5240 5241
		.name = "qeth",
	},
F
Frank Blaschka 已提交
5242 5243 5244 5245 5246
	.ids = qeth_ids,
	.probe = ccwgroup_probe_ccwdev,
	.remove = ccwgroup_remove_ccwdev,
};

5247
static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
F
Frank Blaschka 已提交
5248
{
5249
	int retries = 3;
F
Frank Blaschka 已提交
5250 5251
	int rc;

5252
	QETH_CARD_TEXT(card, 2, "hrdsetup");
F
Frank Blaschka 已提交
5253
	atomic_set(&card->force_alloc_skb, 0);
5254 5255 5256
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		return rc;
F
Frank Blaschka 已提交
5257
retry:
5258
	if (retries < 3)
5259 5260
		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
				 CARD_DEVID(card));
5261
	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5262 5263 5264
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
5265
	qdio_free(CARD_DDEV(card));
5266 5267

	rc = qeth_start_channel(&card->read);
5268 5269
	if (rc)
		goto retriable;
5270
	rc = qeth_start_channel(&card->write);
5271 5272
	if (rc)
		goto retriable;
5273
	rc = qeth_start_channel(&card->data);
5274 5275 5276
	if (rc)
		goto retriable;
retriable:
F
Frank Blaschka 已提交
5277
	if (rc == -ERESTARTSYS) {
5278
		QETH_CARD_TEXT(card, 2, "break1");
F
Frank Blaschka 已提交
5279 5280
		return rc;
	} else if (rc) {
5281
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5282
		if (--retries < 0)
F
Frank Blaschka 已提交
5283 5284 5285 5286
			goto out;
		else
			goto retry;
	}
5287

5288
	qeth_determine_capabilities(card);
5289
	qeth_read_ccw_conf_data(card);
5290
	qeth_idx_init(card);
5291 5292 5293

	rc = qeth_idx_activate_read_channel(card);
	if (rc == -EINTR) {
5294
		QETH_CARD_TEXT(card, 2, "break2");
F
Frank Blaschka 已提交
5295 5296
		return rc;
	} else if (rc) {
5297
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
F
Frank Blaschka 已提交
5298 5299 5300 5301 5302
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5303 5304 5305

	rc = qeth_idx_activate_write_channel(card);
	if (rc == -EINTR) {
5306
		QETH_CARD_TEXT(card, 2, "break3");
F
Frank Blaschka 已提交
5307 5308
		return rc;
	} else if (rc) {
5309
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
F
Frank Blaschka 已提交
5310 5311 5312 5313 5314
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5315
	card->read_or_write_problem = 0;
F
Frank Blaschka 已提交
5316 5317
	rc = qeth_mpc_initialize(card);
	if (rc) {
5318
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
F
Frank Blaschka 已提交
5319 5320
		goto out;
	}
5321

5322 5323
	rc = qeth_send_startlan(card);
	if (rc) {
5324
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5325 5326
		if (rc == -ENETDOWN) {
			dev_warn(&card->gdev->dev, "The LAN is offline\n");
J
Julian Wiedmann 已提交
5327
			*carrier_ok = false;
5328 5329 5330
		} else {
			goto out;
		}
5331
	} else {
J
Julian Wiedmann 已提交
5332 5333 5334
		*carrier_ok = true;
	}

5335 5336 5337
	card->options.ipa4.supported = 0;
	card->options.ipa6.supported = 0;
	card->options.adp.supported = 0;
5338
	card->options.sbp.supported_funcs = 0;
5339
	card->info.diagass_support = 0;
5340 5341 5342
	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
	if (rc == -ENOMEM)
		goto out;
5343 5344 5345 5346 5347
	if (qeth_is_supported(card, IPA_IPV6)) {
		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
		if (rc == -ENOMEM)
			goto out;
	}
5348 5349 5350
	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
		rc = qeth_query_setadapterparms(card);
		if (rc < 0) {
5351
			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5352 5353 5354 5355 5356
			goto out;
		}
	}
	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
		rc = qeth_query_setdiagass(card);
5357
		if (rc)
5358
			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5359
	}
5360

5361 5362
	qeth_trace_features(card);

5363 5364 5365 5366
	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
		card->info.hwtrap = 0;

5367
	if (card->options.isolation != ISOLATION_MODE_NONE) {
5368 5369
		rc = qeth_setadpparms_set_access_ctrl(card,
						      card->options.isolation);
5370 5371 5372
		if (rc)
			goto out;
	}
5373

5374 5375
	qeth_init_link_info(card);

5376 5377 5378 5379 5380 5381
	rc = qeth_init_qdio_queues(card);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
		goto out;
	}

F
Frank Blaschka 已提交
5382 5383
	return 0;
out:
5384 5385
	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
		"an error on the device\n");
5386 5387
	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
			 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
5388 5389 5390
	return rc;
}

5391 5392
static int qeth_set_online(struct qeth_card *card)
{
5393
	bool carrier_ok;
5394 5395 5396 5397 5398 5399
	int rc;

	mutex_lock(&card->discipline_mutex);
	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 2, "setonlin");

5400 5401 5402 5403 5404 5405 5406 5407 5408
	rc = qeth_hardsetup_card(card, &carrier_ok);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
		rc = -ENODEV;
		goto err_hardsetup;
	}

	qeth_print_status_message(card);

5409
	if (card->dev->reg_state != NETREG_REGISTERED)
5410 5411 5412
		/* no need for locking / error handling at this early stage: */
		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));

5413 5414 5415 5416 5417 5418
	rc = card->discipline->set_online(card, carrier_ok);
	if (rc)
		goto err_online;

	/* let user_space know that device is online */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5419 5420 5421

	mutex_unlock(&card->conf_mutex);
	mutex_unlock(&card->discipline_mutex);
5422
	return 0;
5423

5424 5425
err_online:
err_hardsetup:
5426 5427 5428 5429
	qeth_qdio_clear_card(card, 0);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);

5430 5431 5432 5433 5434 5435 5436
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
	qdio_free(CARD_DDEV(card));

	mutex_unlock(&card->conf_mutex);
	mutex_unlock(&card->discipline_mutex);
5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452
	return rc;
}

int qeth_set_offline(struct qeth_card *card, bool resetting)
{
	int rc, rc2, rc3;

	mutex_lock(&card->discipline_mutex);
	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 3, "setoffl");

	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
		card->info.hwtrap = 1;
	}

5453 5454 5455
	/* cancel any stalled cmd that might block the rtnl: */
	qeth_clear_ipacmd_list(card);

5456 5457 5458 5459 5460 5461 5462
	rtnl_lock();
	card->info.open_when_online = card->dev->flags & IFF_UP;
	dev_close(card->dev);
	netif_device_detach(card->dev);
	netif_carrier_off(card->dev);
	rtnl_unlock();

5463 5464
	cancel_work_sync(&card->rx_mode_work);

5465 5466
	card->discipline->set_offline(card);

5467 5468 5469 5470 5471 5472
	qeth_qdio_clear_card(card, 0);
	qeth_drain_output_queues(card);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);
	card->info.promisc_mode = 0;

5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517
	rc  = qeth_stop_channel(&card->data);
	rc2 = qeth_stop_channel(&card->write);
	rc3 = qeth_stop_channel(&card->read);
	if (!rc)
		rc = (rc2) ? rc2 : rc3;
	if (rc)
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
	qdio_free(CARD_DDEV(card));

	/* let user_space know that device is offline */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);

	mutex_unlock(&card->conf_mutex);
	mutex_unlock(&card->discipline_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);

static int qeth_do_reset(void *data)
{
	struct qeth_card *card = data;
	int rc;

	QETH_CARD_TEXT(card, 2, "recover1");
	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
		return 0;
	QETH_CARD_TEXT(card, 2, "recover2");
	dev_warn(&card->gdev->dev,
		 "A recovery process has been started for the device\n");

	qeth_set_offline(card, true);
	rc = qeth_set_online(card);
	if (!rc) {
		dev_info(&card->gdev->dev,
			 "Device successfully recovered!\n");
	} else {
		ccwgroup_set_offline(card->gdev);
		dev_warn(&card->gdev->dev,
			 "The qeth device driver failed to recover an error on the device\n");
	}
	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
	return 0;
}

J
Julian Wiedmann 已提交
5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577
#if IS_ENABLED(CONFIG_QETH_L3)
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
				struct qeth_hdr *hdr)
{
	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
	struct net_device *dev = skb->dev;

	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
				"FAKELL", skb->len);
		return;
	}

	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
							     ETH_P_IP;
		unsigned char tg_addr[ETH_ALEN];

		skb_reset_network_header(skb);
		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
		case QETH_CAST_MULTICAST:
			if (prot == ETH_P_IP)
				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
			else
				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		case QETH_CAST_BROADCAST:
			ether_addr_copy(tg_addr, dev->broadcast);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		default:
			if (card->options.sniffer)
				skb->pkt_type = PACKET_OTHERHOST;
			ether_addr_copy(tg_addr, dev->dev_addr);
		}

		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
			dev_hard_header(skb, dev, prot, tg_addr,
					&l3_hdr->next_hop.rx.src_mac, skb->len);
		else
			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
					skb->len);
	}

	/* copy VLAN tag from hdr into skb */
	if (!card->options.sniffer &&
	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
				l3_hdr->vlan_id :
				l3_hdr->next_hop.rx.vlan_id;

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
	}
}
#endif

static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5578
			     struct qeth_hdr *hdr, bool uses_frags)
J
Julian Wiedmann 已提交
5579
{
5580
	struct napi_struct *napi = &card->napi;
J
Julian Wiedmann 已提交
5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602
	bool is_cso;

	switch (hdr->hdr.l2.id) {
	case QETH_HEADER_TYPE_OSN:
		skb_push(skb, sizeof(*hdr));
		skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
		QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
		QETH_CARD_STAT_INC(card, rx_packets);

		card->osn_info.data_cb(skb);
		return;
#if IS_ENABLED(CONFIG_QETH_L3)
	case QETH_HEADER_TYPE_LAYER3:
		qeth_l3_rebuild_skb(card, skb, hdr);
		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
#endif
	case QETH_HEADER_TYPE_LAYER2:
		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
	default:
		/* never happens */
5603 5604 5605 5606
		if (uses_frags)
			napi_free_frags(napi);
		else
			dev_kfree_skb_any(skb);
J
Julian Wiedmann 已提交
5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624
		return;
	}

	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		QETH_CARD_STAT_INC(card, rx_skb_csum);
	} else {
		skb->ip_summed = CHECKSUM_NONE;
	}

	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
	QETH_CARD_STAT_INC(card, rx_packets);
	if (skb_is_nonlinear(skb)) {
		QETH_CARD_STAT_INC(card, rx_sg_skbs);
		QETH_CARD_STAT_ADD(card, rx_sg_frags,
				   skb_shinfo(skb)->nr_frags);
	}

5625 5626 5627 5628 5629 5630
	if (uses_frags) {
		napi_gro_frags(napi);
	} else {
		skb->protocol = eth_type_trans(skb, skb->dev);
		napi_gro_receive(napi, skb);
	}
J
Julian Wiedmann 已提交
5631 5632
}

5633
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
F
Frank Blaschka 已提交
5634
{
5635
	struct page *page = virt_to_page(data);
5636
	unsigned int next_frag;
5637

5638
	next_frag = skb_shinfo(skb)->nr_frags;
5639
	get_page(page);
5640 5641
	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
			data_len);
F
Frank Blaschka 已提交
5642 5643
}

5644 5645 5646 5647 5648
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}

J
Julian Wiedmann 已提交
5649
static int qeth_extract_skb(struct qeth_card *card,
5650
			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
J
Julian Wiedmann 已提交
5651
			    int *__offset)
F
Frank Blaschka 已提交
5652
{
5653
	struct qeth_priv *priv = netdev_priv(card->dev);
5654
	struct qdio_buffer *buffer = qethbuffer->buffer;
5655
	struct napi_struct *napi = &card->napi;
5656
	struct qdio_buffer_element *element;
5657
	unsigned int linear_len = 0;
5658
	bool uses_frags = false;
F
Frank Blaschka 已提交
5659
	int offset = *__offset;
5660
	bool use_rx_sg = false;
5661
	unsigned int headroom;
J
Julian Wiedmann 已提交
5662
	struct qeth_hdr *hdr;
5663
	struct sk_buff *skb;
5664
	int skb_len = 0;
F
Frank Blaschka 已提交
5665

5666 5667
	element = &buffer->element[*element_no];

5668
next_packet:
F
Frank Blaschka 已提交
5669
	/* qeth_hdr must not cross element boundaries */
5670
	while (element->length < offset + sizeof(struct qeth_hdr)) {
F
Frank Blaschka 已提交
5671
		if (qeth_is_last_sbale(element))
J
Julian Wiedmann 已提交
5672
			return -ENODATA;
F
Frank Blaschka 已提交
5673 5674 5675 5676
		element++;
		offset = 0;
	}

5677
	hdr = phys_to_virt(element->addr) + offset;
J
Julian Wiedmann 已提交
5678
	offset += sizeof(*hdr);
5679 5680
	skb = NULL;

J
Julian Wiedmann 已提交
5681
	switch (hdr->hdr.l2.id) {
5682
	case QETH_HEADER_TYPE_LAYER2:
J
Julian Wiedmann 已提交
5683
		skb_len = hdr->hdr.l2.pkt_length;
5684
		linear_len = ETH_HLEN;
5685
		headroom = 0;
5686 5687
		break;
	case QETH_HEADER_TYPE_LAYER3:
J
Julian Wiedmann 已提交
5688
		skb_len = hdr->hdr.l3.length;
5689 5690 5691 5692 5693
		if (!IS_LAYER3(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

J
Julian Wiedmann 已提交
5694
		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5695 5696 5697 5698 5699
			linear_len = ETH_HLEN;
			headroom = 0;
			break;
		}

J
Julian Wiedmann 已提交
5700
		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5701 5702 5703
			linear_len = sizeof(struct ipv6hdr);
		else
			linear_len = sizeof(struct iphdr);
5704
		headroom = ETH_HLEN;
5705 5706
		break;
	case QETH_HEADER_TYPE_OSN:
J
Julian Wiedmann 已提交
5707
		skb_len = hdr->hdr.osn.pdu_length;
5708 5709 5710 5711 5712
		if (!IS_OSN(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

5713
		linear_len = skb_len;
5714 5715 5716
		headroom = sizeof(struct qeth_hdr);
		break;
	default:
J
Julian Wiedmann 已提交
5717
		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5718 5719 5720 5721
			QETH_CARD_STAT_INC(card, rx_frame_errors);
		else
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);

5722
		/* Can't determine packet length, drop the whole buffer. */
J
Julian Wiedmann 已提交
5723
		return -EPROTONOSUPPORT;
F
Frank Blaschka 已提交
5724 5725
	}

5726 5727 5728 5729
	if (skb_len < linear_len) {
		QETH_CARD_STAT_INC(card, rx_dropped_runt);
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5730

5731
	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5732
		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5733 5734
		     !atomic_read(&card->force_alloc_skb) &&
		     !IS_OSN(card));
5735

5736
	if (use_rx_sg) {
5737
		/* QETH_CQ_ENABLED only: */
5738 5739
		if (qethbuffer->rx_skb &&
		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760
			skb = qethbuffer->rx_skb;
			qethbuffer->rx_skb = NULL;
			goto use_skb;
		}

		skb = napi_get_frags(napi);
		if (!skb) {
			/* -ENOMEM, no point in falling back further. */
			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
			goto walk_packet;
		}

		if (skb_tailroom(skb) >= linear_len + headroom) {
			uses_frags = true;
			goto use_skb;
		}

		netdev_info_once(card->dev,
				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
				 linear_len + headroom, skb_tailroom(skb));
		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
F
Frank Blaschka 已提交
5761
	}
5762

5763 5764 5765
	linear_len = skb_len;
	skb = napi_alloc_skb(napi, linear_len + headroom);
	if (!skb) {
5766
		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5767 5768
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5769

5770 5771 5772
use_skb:
	if (headroom)
		skb_reserve(skb, headroom);
5773
walk_packet:
F
Frank Blaschka 已提交
5774
	while (skb_len) {
5775
		int data_len = min(skb_len, (int)(element->length - offset));
5776
		char *data = phys_to_virt(element->addr) + offset;
5777 5778 5779

		skb_len -= data_len;
		offset += data_len;
5780

5781
		/* Extract data from current element: */
5782
		if (skb && data_len) {
5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796
			if (linear_len) {
				unsigned int copy_len;

				copy_len = min_t(unsigned int, linear_len,
						 data_len);

				skb_put_data(skb, data, copy_len);
				linear_len -= copy_len;
				data_len -= copy_len;
				data += copy_len;
			}

			if (data_len)
				qeth_create_skb_frag(skb, data, data_len);
F
Frank Blaschka 已提交
5797
		}
5798 5799

		/* Step forward to next element: */
F
Frank Blaschka 已提交
5800 5801
		if (skb_len) {
			if (qeth_is_last_sbale(element)) {
C
Carsten Otte 已提交
5802
				QETH_CARD_TEXT(card, 4, "unexeob");
C
Carsten Otte 已提交
5803
				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5804
				if (skb) {
5805 5806 5807 5808
					if (uses_frags)
						napi_free_frags(napi);
					else
						dev_kfree_skb_any(skb);
5809 5810 5811
					QETH_CARD_STAT_INC(card,
							   rx_length_errors);
				}
J
Julian Wiedmann 已提交
5812
				return -EMSGSIZE;
F
Frank Blaschka 已提交
5813 5814 5815 5816 5817
			}
			element++;
			offset = 0;
		}
	}
5818 5819 5820 5821 5822

	/* This packet was skipped, go get another one: */
	if (!skb)
		goto next_packet;

5823
	*element_no = element - &buffer->element[0];
F
Frank Blaschka 已提交
5824
	*__offset = offset;
J
Julian Wiedmann 已提交
5825

5826
	qeth_receive_skb(card, skb, hdr, uses_frags);
J
Julian Wiedmann 已提交
5827 5828 5829
	return 0;
}

5830 5831
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
				      struct qeth_qdio_buffer *buf, bool *done)
J
Julian Wiedmann 已提交
5832
{
5833
	unsigned int work_done = 0;
J
Julian Wiedmann 已提交
5834 5835

	while (budget) {
5836
		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
J
Julian Wiedmann 已提交
5837 5838 5839 5840 5841 5842 5843
				     &card->rx.e_offset)) {
			*done = true;
			break;
		}

		work_done++;
		budget--;
F
Frank Blaschka 已提交
5844
	}
J
Julian Wiedmann 已提交
5845 5846

	return work_done;
F
Frank Blaschka 已提交
5847 5848
}

5849
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5850
{
5851
	struct qeth_rx *ctx = &card->rx;
5852
	unsigned int work_done = 0;
5853

5854
	while (budget > 0) {
5855 5856 5857 5858
		struct qeth_qdio_buffer *buffer;
		unsigned int skbs_done = 0;
		bool done = false;

5859
		/* Fetch completed RX buffers: */
5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870
		if (!card->rx.b_count) {
			card->rx.qdio_err = 0;
			card->rx.b_count = qdio_get_next_buffers(
				card->data.ccwdev, 0, &card->rx.b_index,
				&card->rx.qdio_err);
			if (card->rx.b_count <= 0) {
				card->rx.b_count = 0;
				break;
			}
		}

5871
		/* Process one completed RX buffer: */
5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886
		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
		if (!(card->rx.qdio_err &&
		      qeth_check_qdio_errors(card, buffer->buffer,
					     card->rx.qdio_err, "qinerr")))
			skbs_done = qeth_extract_skbs(card, budget, buffer,
						      &done);
		else
			done = true;

		work_done += skbs_done;
		budget -= skbs_done;

		if (done) {
			QETH_CARD_STAT_INC(card, rx_bufs);
			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5887
			buffer->pool_entry = NULL;
5888
			card->rx.b_count--;
5889 5890 5891
			ctx->bufs_refill++;
			ctx->bufs_refill -= qeth_rx_refill_queue(card,
								 ctx->bufs_refill);
5892 5893 5894 5895 5896

			/* Step forward to next buffer: */
			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
			card->rx.buf_element = 0;
			card->rx.e_offset = 0;
5897 5898 5899
		}
	}

5900 5901 5902
	return work_done;
}

5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920
static void qeth_cq_poll(struct qeth_card *card)
{
	unsigned int work_done = 0;

	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
		unsigned int start, error;
		int completed;

		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
					       &error);
		if (completed <= 0)
			return;

		qeth_qdio_cq_handler(card, error, 1, start, completed);
		work_done += completed;
	}
}

5921 5922 5923 5924 5925 5926 5927
int qeth_poll(struct napi_struct *napi, int budget)
{
	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
	unsigned int work_done;

	work_done = qeth_rx_poll(card, budget);

5928 5929 5930
	if (card->options.cq == QETH_CQ_ENABLED)
		qeth_cq_poll(card);

5931 5932 5933 5934 5935 5936 5937 5938 5939 5940
	if (budget) {
		struct qeth_rx *ctx = &card->rx;

		/* Process any substantial refill backlog: */
		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);

		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
		if (work_done >= budget)
			return work_done;
	}
5941

5942
	if (napi_complete_done(napi, work_done) &&
5943
	    qdio_start_irq(CARD_DDEV(card)))
5944
		napi_schedule(napi);
5945

5946 5947 5948 5949
	return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);

5950
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5951
				 unsigned int bidx, bool error, int budget)
5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980
{
	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
	u8 sflags = buffer->buffer->element[15].sflags;
	struct qeth_card *card = queue->card;

	if (queue->bufstates && (queue->bufstates[bidx].flags &
				 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
		WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);

		if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
						   QETH_QDIO_BUF_PENDING) ==
		    QETH_QDIO_BUF_PRIMED)
			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);

		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);

		/* prepare the queue slot for re-use: */
		qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
		if (qeth_init_qdio_out_buf(queue, bidx)) {
			QETH_CARD_TEXT(card, 2, "outofbuf");
			qeth_schedule_recovery(card);
		}

		return;
	}

	if (card->options.cq == QETH_CQ_ENABLED)
		qeth_notify_skbs(queue, buffer,
				 qeth_compute_cq_notification(sflags, 0));
5981
	qeth_clear_output_buffer(queue, buffer, error, budget);
5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996
}

static int qeth_tx_poll(struct napi_struct *napi, int budget)
{
	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
	unsigned int queue_no = queue->queue_no;
	struct qeth_card *card = queue->card;
	struct net_device *dev = card->dev;
	unsigned int work_done = 0;
	struct netdev_queue *txq;

	txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));

	while (1) {
		unsigned int start, error, i;
5997 5998
		unsigned int packets = 0;
		unsigned int bytes = 0;
5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018
		int completed;

		if (qeth_out_queue_is_empty(queue)) {
			napi_complete(napi);
			return 0;
		}

		/* Give the CPU a breather: */
		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
			QETH_TXQ_STAT_INC(queue, completion_yield);
			if (napi_complete_done(napi, 0))
				napi_schedule(napi);
			return 0;
		}

		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
					       &start, &error);
		if (completed <= 0) {
			/* Ensure we see TX completion for pending work: */
			if (napi_complete_done(napi, 0))
6019
				qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
6020 6021 6022 6023
			return 0;
		}

		for (i = start; i < start + completed; i++) {
6024
			struct qeth_qdio_out_buffer *buffer;
6025 6026
			unsigned int bidx = QDIO_BUFNR(i);

6027
			buffer = queue->bufs[bidx];
6028
			packets += buffer->frames;
6029 6030 6031
			bytes += buffer->bytes;

			qeth_handle_send_error(card, buffer, error);
6032
			qeth_iqd_tx_complete(queue, bidx, error, budget);
6033 6034 6035
			qeth_cleanup_handled_pending(queue, bidx, false);
		}

6036
		netdev_tx_completed_queue(txq, packets, bytes);
6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050
		atomic_sub(completed, &queue->used_buffers);
		work_done += completed;

		/* xmit may have observed the full-condition, but not yet
		 * stopped the txq. In which case the code below won't trigger.
		 * So before returning, xmit will re-check the txq's fill level
		 * and wake it up if needed.
		 */
		if (netif_tx_queue_stopped(txq) &&
		    !qeth_out_queue_is_full(queue))
			netif_tx_wake_queue(txq);
	}
}

6051 6052 6053 6054 6055 6056 6057
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
	if (!cmd->hdr.return_code)
		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	return cmd->hdr.return_code;
}

6058 6059 6060 6061 6062 6063 6064 6065
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
					struct qeth_reply *reply,
					unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_ipa_caps *caps = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6066
		return -EIO;
6067 6068 6069 6070 6071 6072

	caps->supported = cmd->data.setassparms.data.caps.supported;
	caps->enabled = cmd->data.setassparms.data.caps.enabled;
	return 0;
}

6073 6074
int qeth_setassparms_cb(struct qeth_card *card,
			struct qeth_reply *reply, unsigned long data)
6075
{
6076
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6077 6078 6079

	QETH_CARD_TEXT(card, 4, "defadpcb");

6080 6081 6082 6083 6084
	if (cmd->hdr.return_code)
		return -EIO;

	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6085
		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6086
	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6087
		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6088 6089
	return 0;
}
6090
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6091

6092 6093
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
						 enum qeth_ipa_funcs ipa_func,
6094 6095
						 u16 cmd_code,
						 unsigned int data_length,
6096
						 enum qeth_prot_versions prot)
6097
{
6098 6099
	struct qeth_ipacmd_setassparms *setassparms;
	struct qeth_ipacmd_setassparms_hdr *hdr;
6100 6101 6102
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 4, "getasscm");
6103 6104 6105 6106 6107 6108
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
				 data_length +
				 offsetof(struct qeth_ipacmd_setassparms,
					  data));
	if (!iob)
		return NULL;
6109

6110 6111
	setassparms = &__ipa_cmd(iob)->data.setassparms;
	setassparms->assist_no = ipa_func;
6112

6113 6114 6115
	hdr = &setassparms->hdr;
	hdr->length = sizeof(*hdr) + data_length;
	hdr->command_code = cmd_code;
6116 6117
	return iob;
}
6118
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6119

6120 6121
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
				      enum qeth_ipa_funcs ipa_func,
6122
				      u16 cmd_code, u32 *data,
6123
				      enum qeth_prot_versions prot)
6124
{
6125
	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6126 6127
	struct qeth_cmd_buffer *iob;

6128 6129
	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6130 6131
	if (!iob)
		return -ENOMEM;
6132

6133 6134
	if (data)
		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6135
	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6136
}
6137
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6138

F
Frank Blaschka 已提交
6139 6140
static void qeth_unregister_dbf_views(void)
{
6141
	int x;
6142

6143 6144 6145 6146
	for (x = 0; x < QETH_DBF_INFOS; x++) {
		debug_unregister(qeth_dbf[x].id);
		qeth_dbf[x].id = NULL;
	}
F
Frank Blaschka 已提交
6147 6148
}

C
Carsten Otte 已提交
6149
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
P
Peter Tiedemann 已提交
6150 6151
{
	char dbf_txt_buf[32];
6152
	va_list args;
P
Peter Tiedemann 已提交
6153

6154
	if (!debug_level_enabled(id, level))
P
Peter Tiedemann 已提交
6155
		return;
6156 6157 6158
	va_start(args, fmt);
	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
	va_end(args);
C
Carsten Otte 已提交
6159
	debug_text_event(id, level, dbf_txt_buf);
P
Peter Tiedemann 已提交
6160 6161 6162
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);

F
Frank Blaschka 已提交
6163 6164
static int qeth_register_dbf_views(void)
{
6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177
	int ret;
	int x;

	for (x = 0; x < QETH_DBF_INFOS; x++) {
		/* register the areas */
		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
						qeth_dbf[x].pages,
						qeth_dbf[x].areas,
						qeth_dbf[x].len);
		if (qeth_dbf[x].id == NULL) {
			qeth_unregister_dbf_views();
			return -ENOMEM;
		}
F
Frank Blaschka 已提交
6178

6179 6180 6181 6182 6183 6184
		/* register a view */
		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
		if (ret) {
			qeth_unregister_dbf_views();
			return ret;
		}
F
Frank Blaschka 已提交
6185

6186 6187 6188
		/* set a passing level */
		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
	}
F
Frank Blaschka 已提交
6189 6190 6191 6192

	return 0;
}

6193 6194
static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */

F
Frank Blaschka 已提交
6195 6196 6197
int qeth_core_load_discipline(struct qeth_card *card,
		enum qeth_discipline_id discipline)
{
6198
	mutex_lock(&qeth_mod_mutex);
F
Frank Blaschka 已提交
6199 6200
	switch (discipline) {
	case QETH_DISCIPLINE_LAYER3:
6201 6202
		card->discipline = try_then_request_module(
			symbol_get(qeth_l3_discipline), "qeth_l3");
F
Frank Blaschka 已提交
6203 6204
		break;
	case QETH_DISCIPLINE_LAYER2:
6205 6206
		card->discipline = try_then_request_module(
			symbol_get(qeth_l2_discipline), "qeth_l2");
F
Frank Blaschka 已提交
6207
		break;
6208 6209
	default:
		break;
F
Frank Blaschka 已提交
6210
	}
6211
	mutex_unlock(&qeth_mod_mutex);
6212

6213
	if (!card->discipline) {
6214 6215
		dev_err(&card->gdev->dev, "There is no kernel module to "
			"support discipline %d\n", discipline);
6216
		return -EINVAL;
F
Frank Blaschka 已提交
6217
	}
6218

6219
	card->options.layer = discipline;
6220
	return 0;
F
Frank Blaschka 已提交
6221 6222 6223 6224
}

void qeth_core_free_discipline(struct qeth_card *card)
{
6225
	if (IS_LAYER2(card))
6226
		symbol_put(qeth_l2_discipline);
F
Frank Blaschka 已提交
6227
	else
6228
		symbol_put(qeth_l3_discipline);
6229
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6230
	card->discipline = NULL;
F
Frank Blaschka 已提交
6231 6232
}

6233
const struct device_type qeth_generic_devtype = {
6234 6235 6236
	.name = "qeth_generic",
	.groups = qeth_generic_attr_groups,
};
6237 6238
EXPORT_SYMBOL_GPL(qeth_generic_devtype);

6239 6240 6241 6242 6243
static const struct device_type qeth_osn_devtype = {
	.name = "qeth_osn",
	.groups = qeth_osn_attr_groups,
};

6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311
#define DBF_NAME_LEN	20

struct qeth_dbf_entry {
	char dbf_name[DBF_NAME_LEN];
	debug_info_t *dbf_info;
	struct list_head dbf_list;
};

static LIST_HEAD(qeth_dbf_list);
static DEFINE_MUTEX(qeth_dbf_list_mutex);

static debug_info_t *qeth_get_dbf_entry(char *name)
{
	struct qeth_dbf_entry *entry;
	debug_info_t *rc = NULL;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
		if (strcmp(entry->dbf_name, name) == 0) {
			rc = entry->dbf_info;
			break;
		}
	}
	mutex_unlock(&qeth_dbf_list_mutex);
	return rc;
}

static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
{
	struct qeth_dbf_entry *new_entry;

	card->debug = debug_register(name, 2, 1, 8);
	if (!card->debug) {
		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
		goto err;
	}
	if (debug_register_view(card->debug, &debug_hex_ascii_view))
		goto err_dbg;
	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
	if (!new_entry)
		goto err_dbg;
	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
	new_entry->dbf_info = card->debug;
	mutex_lock(&qeth_dbf_list_mutex);
	list_add(&new_entry->dbf_list, &qeth_dbf_list);
	mutex_unlock(&qeth_dbf_list_mutex);

	return 0;

err_dbg:
	debug_unregister(card->debug);
err:
	return -ENOMEM;
}

static void qeth_clear_dbf_list(void)
{
	struct qeth_dbf_entry *entry, *tmp;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
		list_del(&entry->dbf_list);
		debug_unregister(entry->dbf_info);
		kfree(entry);
	}
	mutex_unlock(&qeth_dbf_list_mutex);
}

6312 6313 6314
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
	struct net_device *dev;
6315
	struct qeth_priv *priv;
6316 6317 6318

	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
6319
		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6320
				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6321
		break;
6322
	case QETH_CARD_TYPE_OSM:
6323
		dev = alloc_etherdev(sizeof(*priv));
6324
		break;
6325
	case QETH_CARD_TYPE_OSN:
6326 6327
		dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
				   ether_setup);
6328 6329
		break;
	default:
6330
		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6331 6332 6333 6334 6335
	}

	if (!dev)
		return NULL;

6336 6337
	priv = netdev_priv(dev);
	priv->rx_copybreak = QETH_RX_COPYBREAK;
6338
	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6339

6340 6341
	dev->ml_priv = card;
	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6342
	dev->min_mtu = IS_OSN(card) ? 64 : 576;
6343 6344 6345
	 /* initialized when device first goes online: */
	dev->max_mtu = 0;
	dev->mtu = 0;
6346 6347
	SET_NETDEV_DEV(dev, &card->gdev->dev);
	netif_carrier_off(dev);
6348

6349 6350 6351 6352 6353 6354 6355 6356 6357 6358
	if (IS_OSN(card)) {
		dev->ethtool_ops = &qeth_osn_ethtool_ops;
	} else {
		dev->ethtool_ops = &qeth_ethtool_ops;
		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		dev->hw_features |= NETIF_F_SG;
		dev->vlan_features |= NETIF_F_SG;
		if (IS_IQD(card))
			dev->features |= NETIF_F_SG;
	}
6359

6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373
	return dev;
}

struct net_device *qeth_clone_netdev(struct net_device *orig)
{
	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);

	if (!clone)
		return NULL;

	clone->dev_port = orig->dev_port;
	return clone;
}

F
Frank Blaschka 已提交
6374 6375 6376 6377 6378
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card;
	struct device *dev;
	int rc;
6379
	enum qeth_discipline_id enforced_disc;
6380
	char dbf_name[DBF_NAME_LEN];
F
Frank Blaschka 已提交
6381

6382
	QETH_DBF_TEXT(SETUP, 2, "probedev");
F
Frank Blaschka 已提交
6383 6384 6385 6386 6387

	dev = &gdev->dev;
	if (!get_device(dev))
		return -ENODEV;

6388
	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
F
Frank Blaschka 已提交
6389

6390
	card = qeth_alloc_card(gdev);
F
Frank Blaschka 已提交
6391
	if (!card) {
6392
		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
F
Frank Blaschka 已提交
6393 6394 6395
		rc = -ENOMEM;
		goto err_dev;
	}
6396 6397 6398

	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
		dev_name(&gdev->dev));
6399
	card->debug = qeth_get_dbf_entry(dbf_name);
6400
	if (!card->debug) {
6401 6402 6403
		rc = qeth_add_dbf_entry(card, dbf_name);
		if (rc)
			goto err_card;
6404 6405
	}

6406
	qeth_setup_card(card);
6407
	card->dev = qeth_alloc_netdev(card);
6408 6409
	if (!card->dev) {
		rc = -ENOMEM;
6410
		goto err_card;
6411
	}
6412

6413 6414 6415
	qeth_determine_capabilities(card);
	qeth_set_blkt_defaults(card);

6416 6417 6418 6419
	card->qdio.no_out_queues = card->dev->num_tx_queues;
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		goto err_chp_desc;
6420

6421 6422 6423 6424 6425 6426 6427 6428
	enforced_disc = qeth_enforce_discipline(card);
	switch (enforced_disc) {
	case QETH_DISCIPLINE_UNDETERMINED:
		gdev->dev.type = &qeth_generic_devtype;
		break;
	default:
		card->info.layer_enforced = true;
		rc = qeth_core_load_discipline(card, enforced_disc);
6429
		if (rc)
6430
			goto err_load;
6431

6432 6433
		gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
						card->discipline->devtype;
6434
		rc = card->discipline->setup(card->gdev);
F
Frank Blaschka 已提交
6435
		if (rc)
6436
			goto err_disc;
6437
		break;
F
Frank Blaschka 已提交
6438 6439 6440 6441
	}

	return 0;

6442 6443
err_disc:
	qeth_core_free_discipline(card);
6444
err_load:
6445
err_chp_desc:
6446
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457
err_card:
	qeth_core_free_card(card);
err_dev:
	put_device(dev);
	return rc;
}

static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);

6458
	QETH_CARD_TEXT(card, 2, "removedv");
F
Frank Blaschka 已提交
6459

6460 6461
	if (card->discipline) {
		card->discipline->remove(gdev);
6462 6463 6464
		qeth_core_free_discipline(card);
	}

6465 6466
	qeth_free_qdio_queues(card);

6467
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6468 6469 6470 6471 6472 6473 6474 6475
	qeth_core_free_card(card);
	put_device(&gdev->dev);
}

static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
	int rc = 0;
6476
	enum qeth_discipline_id def_discipline;
F
Frank Blaschka 已提交
6477

6478
	if (!card->discipline) {
6479 6480
		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
						QETH_DISCIPLINE_LAYER2;
F
Frank Blaschka 已提交
6481 6482 6483
		rc = qeth_core_load_discipline(card, def_discipline);
		if (rc)
			goto err;
6484
		rc = card->discipline->setup(card->gdev);
6485 6486
		if (rc) {
			qeth_core_free_discipline(card);
F
Frank Blaschka 已提交
6487
			goto err;
6488
		}
F
Frank Blaschka 已提交
6489
	}
6490 6491

	rc = qeth_set_online(card);
F
Frank Blaschka 已提交
6492 6493 6494 6495 6496 6497 6498
err:
	return rc;
}

static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6499 6500

	return qeth_set_offline(card, false);
F
Frank Blaschka 已提交
6501 6502 6503 6504 6505
}

static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6506

6507 6508 6509 6510
	qeth_set_allowed_threads(card, 0, 1);
	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
	qeth_qdio_clear_card(card, 0);
6511
	qeth_drain_output_queues(card);
6512
	qdio_free(CARD_DDEV(card));
F
Frank Blaschka 已提交
6513 6514
}

6515 6516
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
			   size_t count)
F
Frank Blaschka 已提交
6517 6518 6519
{
	int err;

6520 6521
	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
				  buf);
6522 6523 6524

	return err ? err : count;
}
6525
static DRIVER_ATTR_WO(group);
F
Frank Blaschka 已提交
6526

6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538
static struct attribute *qeth_drv_attrs[] = {
	&driver_attr_group.attr,
	NULL,
};
static struct attribute_group qeth_drv_attr_group = {
	.attrs = qeth_drv_attrs,
};
static const struct attribute_group *qeth_drv_attr_groups[] = {
	&qeth_drv_attr_group,
	NULL,
};

6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
	.driver = {
		.groups = qeth_drv_attr_groups,
		.owner = THIS_MODULE,
		.name = "qeth",
	},
	.ccw_driver = &qeth_ccw_driver,
	.setup = qeth_core_probe_device,
	.remove = qeth_core_remove_device,
	.set_online = qeth_core_set_online,
	.set_offline = qeth_core_set_offline,
	.shutdown = qeth_core_shutdown,
};

J
Julian Wiedmann 已提交
6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567
struct qeth_card *qeth_get_card_by_busid(char *bus_id)
{
	struct ccwgroup_device *gdev;
	struct qeth_card *card;

	gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
	if (!gdev)
		return NULL;

	card = dev_get_drvdata(&gdev->dev);
	put_device(&gdev->dev);
	return card;
}
EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);

6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct qeth_card *card = dev->ml_priv;
	struct mii_ioctl_data *mii_data;
	int rc = 0;

	switch (cmd) {
	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
		break;
	case SIOC_QETH_GET_CARD_TYPE:
6579 6580
		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
		    !IS_VM_NIC(card))
6581
			return 1;
6582
		return 0;
6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609
	case SIOCGMIIPHY:
		mii_data = if_mii(rq);
		mii_data->phy_id = 0;
		break;
	case SIOCGMIIREG:
		mii_data = if_mii(rq);
		if (mii_data->phy_id != 0)
			rc = -EINVAL;
		else
			mii_data->val_out = qeth_mdio_read(dev,
				mii_data->phy_id, mii_data->reg_num);
		break;
	case SIOC_QETH_QUERY_OAT:
		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
		break;
	default:
		if (card->discipline->do_ioctl)
			rc = card->discipline->do_ioctl(dev, rq, cmd);
		else
			rc = -EOPNOTSUPP;
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);

6610 6611
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
			      unsigned long data)
6612 6613
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6614
	u32 *features = reply->param;
6615

6616
	if (qeth_setassparms_inspect_rc(cmd))
6617
		return -EIO;
6618

6619
	*features = cmd->data.setassparms.data.flags_32bit;
6620 6621 6622
	return 0;
}

6623 6624
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
			     enum qeth_prot_versions prot)
6625
{
6626 6627
	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
						 NULL, prot);
6628 6629
}

6630
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6631
			    enum qeth_prot_versions prot, u8 *lp2lp)
6632
{
6633
	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6634 6635 6636
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	u32 features;
6637 6638
	int rc;

6639 6640 6641
	/* some L3 HW requires combined L3+L4 csum offload: */
	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
	    cstype == IPA_OUTBOUND_CHECKSUM)
6642
		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6643

6644 6645 6646 6647 6648 6649 6650
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
				       prot);
	if (!iob)
		return -ENOMEM;

	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
	if (rc)
6651
		return rc;
6652

6653 6654 6655 6656
	if ((required_features & features) != required_features) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}
6657

6658 6659
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(flags_32bit),
6660
				       prot);
6661 6662 6663
	if (!iob) {
		qeth_set_csum_off(card, cstype, prot);
		return -ENOMEM;
6664
	}
6665 6666 6667 6668 6669

	if (features & QETH_IPA_CHECKSUM_LP2LP)
		required_features |= QETH_IPA_CHECKSUM_LP2LP;
	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6670
	if (rc) {
6671
		qeth_set_csum_off(card, cstype, prot);
6672 6673
		return rc;
	}
6674

6675 6676 6677 6678 6679 6680
	if (!qeth_ipa_caps_supported(&caps, required_features) ||
	    !qeth_ipa_caps_enabled(&caps, required_features)) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}

6681 6682
	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6683 6684 6685 6686

	if (lp2lp)
		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);

6687 6688 6689
	return 0;
}

6690
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6691
			     enum qeth_prot_versions prot, u8 *lp2lp)
6692
{
6693
	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6694
		    qeth_set_csum_off(card, cstype, prot);
6695 6696
}

6697 6698 6699 6700 6701 6702 6703
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
			     unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_tso_start_data *tso_data = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6704
		return -EIO;
6705 6706 6707 6708 6709 6710

	tso_data->mss = cmd->data.setassparms.data.tso.mss;
	tso_data->supported = cmd->data.setassparms.data.tso.supported;
	return 0;
}

6711 6712
static int qeth_set_tso_off(struct qeth_card *card,
			    enum qeth_prot_versions prot)
6713
{
6714
	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6715
						 IPA_CMD_ASS_STOP, NULL, prot);
6716
}
6717

6718 6719 6720
static int qeth_set_tso_on(struct qeth_card *card,
			   enum qeth_prot_versions prot)
{
6721 6722 6723 6724 6725 6726 6727 6728 6729 6730
	struct qeth_tso_start_data tso_data;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	int rc;

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
				       IPA_CMD_ASS_START, 0, prot);
	if (!iob)
		return -ENOMEM;

6731
	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6732 6733 6734 6735 6736 6737 6738 6739 6740
	if (rc)
		return rc;

	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6741 6742
				       IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(caps), prot);
6743 6744 6745 6746 6747 6748
	if (!iob) {
		qeth_set_tso_off(card, prot);
		return -ENOMEM;
	}

	/* enable TSO capability */
6749 6750 6751
	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
		QETH_IPA_LARGE_SEND_TCP;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765
	if (rc) {
		qeth_set_tso_off(card, prot);
		return rc;
	}

	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
		 tso_data.mss);
	return 0;
6766
}
6767

6768 6769 6770
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
			    enum qeth_prot_versions prot)
{
6771
	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6772
}
6773

6774 6775 6776 6777 6778 6779 6780
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
{
	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
	int rc_ipv6;

	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6781
					    QETH_PROT_IPV4, NULL);
6782 6783 6784
	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
		/* no/one Offload Assist available, so the rc is trivial */
		return rc_ipv4;
6785

6786
	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6787
				    QETH_PROT_IPV6, NULL);
6788 6789 6790 6791 6792 6793 6794 6795 6796

	if (on)
		/* enable: success if any Assist is active */
		return (rc_ipv6) ? rc_ipv4 : 0;

	/* disable: failure if any Assist is still active */
	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}

6797
/**
6798 6799
 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
 * @dev:	a net_device
6800
 */
6801
void qeth_enable_hw_features(struct net_device *dev)
6802 6803
{
	struct qeth_card *card = dev->ml_priv;
6804
	netdev_features_t features;
6805

6806
	features = dev->features;
6807
	/* force-off any feature that might need an IPA sequence.
6808 6809
	 * netdev_update_features() will restart them.
	 */
6810 6811 6812 6813 6814 6815
	dev->features &= ~dev->hw_features;
	/* toggle VLAN filter, so that VIDs are re-programmed: */
	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
	}
6816
	netdev_update_features(dev);
6817 6818 6819
	if (features != dev->features)
		dev_warn(&card->gdev->dev,
			 "Device recovery failed to restore all offload features\n");
6820
}
6821
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6822

6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840
static void qeth_check_restricted_features(struct qeth_card *card,
					   netdev_features_t changed,
					   netdev_features_t actual)
{
	netdev_features_t ipv6_features = NETIF_F_TSO6;
	netdev_features_t ipv4_features = NETIF_F_TSO;

	if (!card->info.has_lp2lp_cso_v6)
		ipv6_features |= NETIF_F_IPV6_CSUM;
	if (!card->info.has_lp2lp_cso_v4)
		ipv4_features |= NETIF_F_IP_CSUM;

	if ((changed & ipv6_features) && !(actual & ipv6_features))
		qeth_flush_local_addrs6(card);
	if ((changed & ipv4_features) && !(actual & ipv4_features))
		qeth_flush_local_addrs4(card);
}

6841 6842 6843
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;
6844
	netdev_features_t changed = dev->features ^ features;
6845 6846
	int rc = 0;

6847 6848
	QETH_CARD_TEXT(card, 2, "setfeat");
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6849

6850
	if ((changed & NETIF_F_IP_CSUM)) {
6851
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6852 6853
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
				       &card->info.has_lp2lp_cso_v4);
6854 6855 6856
		if (rc)
			changed ^= NETIF_F_IP_CSUM;
	}
6857 6858
	if (changed & NETIF_F_IPV6_CSUM) {
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6859 6860
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
				       &card->info.has_lp2lp_cso_v6);
6861 6862 6863
		if (rc)
			changed ^= NETIF_F_IPV6_CSUM;
	}
6864 6865
	if (changed & NETIF_F_RXCSUM) {
		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6866 6867 6868
		if (rc)
			changed ^= NETIF_F_RXCSUM;
	}
6869 6870 6871
	if (changed & NETIF_F_TSO) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
				      QETH_PROT_IPV4);
6872 6873 6874
		if (rc)
			changed ^= NETIF_F_TSO;
	}
6875 6876 6877 6878 6879 6880
	if (changed & NETIF_F_TSO6) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
				      QETH_PROT_IPV6);
		if (rc)
			changed ^= NETIF_F_TSO6;
	}
6881

6882 6883 6884
	qeth_check_restricted_features(card, dev->features ^ features,
				       dev->features ^ changed);

6885 6886 6887 6888 6889 6890
	/* everything changed successfully? */
	if ((dev->features ^ features) == changed)
		return 0;
	/* something went wrong. save changed features and return error */
	dev->features ^= changed;
	return -EIO;
6891 6892 6893 6894 6895 6896 6897 6898
}
EXPORT_SYMBOL_GPL(qeth_set_features);

netdev_features_t qeth_fix_features(struct net_device *dev,
				    netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;

6899
	QETH_CARD_TEXT(card, 2, "fixfeat");
6900 6901
	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
		features &= ~NETIF_F_IP_CSUM;
6902 6903
	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
		features &= ~NETIF_F_IPV6_CSUM;
6904 6905
	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6906
		features &= ~NETIF_F_RXCSUM;
6907
	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6908
		features &= ~NETIF_F_TSO;
6909 6910
	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
		features &= ~NETIF_F_TSO6;
6911

6912
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6913 6914 6915
	return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
6916

6917 6918 6919 6920
netdev_features_t qeth_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features)
{
6921 6922
	struct qeth_card *card = dev->ml_priv;

6923
	/* Traffic with local next-hop is not eligible for some offloads: */
6924
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6925
	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950
		netdev_features_t restricted = 0;

		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
			restricted |= NETIF_F_ALL_TSO;

		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
			if (!card->info.has_lp2lp_cso_v4)
				restricted |= NETIF_F_IP_CSUM;

			if (restricted && qeth_next_hop_is_local_v4(card, skb))
				features &= ~restricted;
			break;
		case htons(ETH_P_IPV6):
			if (!card->info.has_lp2lp_cso_v6)
				restricted |= NETIF_F_IPV6_CSUM;

			if (restricted && qeth_next_hop_is_local_v6(card, skb))
				features &= ~restricted;
			break;
		default:
			break;
		}
	}

6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972
	/* GSO segmentation builds skbs with
	 *	a (small) linear part for the headers, and
	 *	page frags for the data.
	 * Compared to a linear skb, the header-only part consumes an
	 * additional buffer element. This reduces buffer utilization, and
	 * hurts throughput. So compress small segments into one element.
	 */
	if (netif_needs_gso(skb, features)) {
		/* match skb_segment(): */
		unsigned int doffset = skb->data - skb_mac_header(skb);
		unsigned int hsize = skb_shinfo(skb)->gso_size;
		unsigned int hroom = skb_headroom(skb);

		/* linearize only if resulting skb allocations are order-0: */
		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
			features &= ~NETIF_F_SG;
	}

	return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);

6973 6974 6975 6976 6977 6978 6979 6980 6981 6982
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct qeth_card *card = dev->ml_priv;
	struct qeth_qdio_out_q *queue;
	unsigned int i;

	QETH_CARD_TEXT(card, 5, "getstat");

	stats->rx_packets = card->stats.rx_packets;
	stats->rx_bytes = card->stats.rx_bytes;
6983
	stats->rx_errors = card->stats.rx_length_errors +
6984
			   card->stats.rx_frame_errors +
6985 6986
			   card->stats.rx_fifo_errors;
	stats->rx_dropped = card->stats.rx_dropped_nomem +
6987 6988
			    card->stats.rx_dropped_notsupp +
			    card->stats.rx_dropped_runt;
6989
	stats->multicast = card->stats.rx_multicast;
6990
	stats->rx_length_errors = card->stats.rx_length_errors;
6991
	stats->rx_frame_errors = card->stats.rx_frame_errors;
6992
	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004

	for (i = 0; i < card->qdio.no_out_queues; i++) {
		queue = card->qdio.out_qs[i];

		stats->tx_packets += queue->stats.tx_packets;
		stats->tx_bytes += queue->stats.tx_bytes;
		stats->tx_errors += queue->stats.tx_errors;
		stats->tx_dropped += queue->stats.tx_dropped;
	}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);

7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044
#define TC_IQD_UCAST   0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
				     unsigned int ucast_txqs)
{
	unsigned int prio;

	/* IQD requires mcast traffic to be placed on a dedicated queue, and
	 * qeth_iqd_select_queue() deals with this.
	 * For unicast traffic, we defer the queue selection to the stack.
	 * By installing a trivial prio map that spans over only the unicast
	 * queues, we can encourage the stack to spread the ucast traffic evenly
	 * without selecting the mcast queue.
	 */

	/* One traffic class, spanning over all active ucast queues: */
	netdev_set_num_tc(dev, 1);
	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
			    QETH_IQD_MIN_UCAST_TXQ);

	/* Map all priorities to this traffic class: */
	for (prio = 0; prio <= TC_BITMASK; prio++)
		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}

int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
	struct net_device *dev = card->dev;
	int rc;

	/* Per netif_setup_tc(), adjust the mapping first: */
	if (IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, count - 1);

	rc = netif_set_real_num_tx_queues(dev, count);

	if (rc && IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);

	return rc;
}
7045
EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7046

7047 7048 7049
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
			  u8 cast_type, struct net_device *sb_dev)
{
7050 7051
	u16 txq;

7052 7053
	if (cast_type != RTN_UNICAST)
		return QETH_IQD_MCAST_TXQ;
7054 7055
	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
		return QETH_IQD_MIN_UCAST_TXQ;
7056 7057 7058

	txq = netdev_pick_tx(dev, skb, sb_dev);
	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7059 7060 7061
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);

7062
int qeth_open(struct net_device *dev)
7063 7064 7065 7066 7067 7068
{
	struct qeth_card *card = dev->ml_priv;

	QETH_CARD_TEXT(card, 4, "qethopen");

	card->data.state = CH_STATE_UP;
7069
	netif_tx_start_all_queues(dev);
7070 7071 7072 7073

	napi_enable(&card->napi);
	local_bh_disable();
	napi_schedule(&card->napi);
7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084
	if (IS_IQD(card)) {
		struct qeth_qdio_out_q *queue;
		unsigned int i;

		qeth_for_each_output_queue(card, queue, i) {
			netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
					  QETH_NAPI_WEIGHT);
			napi_enable(&queue->napi);
			napi_schedule(&queue->napi);
		}
	}
7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095
	/* kick-start the NAPI softirq: */
	local_bh_enable();
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);

int qeth_stop(struct net_device *dev)
{
	struct qeth_card *card = dev->ml_priv;

	QETH_CARD_TEXT(card, 4, "qethstop");
7096 7097 7098 7099 7100
	if (IS_IQD(card)) {
		struct qeth_qdio_out_q *queue;
		unsigned int i;

		/* Quiesce the NAPI instances: */
7101
		qeth_for_each_output_queue(card, queue, i)
7102 7103 7104 7105 7106
			napi_disable(&queue->napi);

		/* Stop .ndo_start_xmit, might still access queue->napi. */
		netif_tx_disable(dev);

7107 7108 7109
		qeth_for_each_output_queue(card, queue, i) {
			del_timer_sync(&queue->timer);
			/* Queues may get re-allocated, so remove the NAPIs. */
7110
			netif_napi_del(&queue->napi);
7111
		}
7112 7113 7114 7115
	} else {
		netif_tx_disable(dev);
	}

7116
	napi_disable(&card->napi);
7117
	cancel_delayed_work_sync(&card->buffer_reclaim_work);
7118
	qdio_stop_irq(CARD_DDEV(card));
7119

7120 7121 7122 7123
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);

F
Frank Blaschka 已提交
7124 7125 7126 7127
static int __init qeth_core_init(void)
{
	int rc;

7128
	pr_info("loading core functions\n");
F
Frank Blaschka 已提交
7129

7130 7131
	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);

F
Frank Blaschka 已提交
7132 7133
	rc = qeth_register_dbf_views();
	if (rc)
7134
		goto dbf_err;
M
Mark McLoughlin 已提交
7135
	qeth_core_root_dev = root_device_register("qeth");
7136
	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
F
Frank Blaschka 已提交
7137 7138
	if (rc)
		goto register_err;
7139 7140 7141 7142
	qeth_core_header_cache =
		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
				  0, NULL);
7143 7144 7145 7146
	if (!qeth_core_header_cache) {
		rc = -ENOMEM;
		goto slab_err;
	}
7147 7148 7149 7150 7151 7152
	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
	if (!qeth_qdio_outbuf_cache) {
		rc = -ENOMEM;
		goto cqslab_err;
	}
7153 7154 7155 7156 7157 7158
	rc = ccw_driver_register(&qeth_ccw_driver);
	if (rc)
		goto ccw_err;
	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
	if (rc)
		goto ccwgroup_err;
7159

7160
	return 0;
7161 7162 7163 7164 7165

ccwgroup_err:
	ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7166 7167
cqslab_err:
	kmem_cache_destroy(qeth_core_header_cache);
7168
slab_err:
M
Mark McLoughlin 已提交
7169
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7170 7171
register_err:
	qeth_unregister_dbf_views();
7172
dbf_err:
7173
	debugfs_remove_recursive(qeth_debugfs_root);
7174
	pr_err("Initializing the qeth device driver failed\n");
F
Frank Blaschka 已提交
7175 7176 7177 7178 7179
	return rc;
}

static void __exit qeth_core_exit(void)
{
7180
	qeth_clear_dbf_list();
F
Frank Blaschka 已提交
7181 7182
	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
	ccw_driver_unregister(&qeth_ccw_driver);
7183
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7184
	kmem_cache_destroy(qeth_core_header_cache);
7185
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7186
	qeth_unregister_dbf_views();
7187
	debugfs_remove_recursive(qeth_debugfs_root);
7188
	pr_info("core functions removed\n");
F
Frank Blaschka 已提交
7189 7190 7191 7192 7193 7194 7195
}

module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");