cm.c 115.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/module.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/notifier.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
41
#include <linux/if_vlan.h>
42 43 44 45

#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/route.h>
46
#include <net/tcp.h>
47 48
#include <net/ip6_route.h>
#include <net/addrconf.h>
49

S
Steve Wise 已提交
50 51
#include <rdma/ib_addr.h>

52
#include "iw_cxgb4.h"
H
Hariprasad S 已提交
53
#include "clip_tbl.h"
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

static char *states[] = {
	"idle",
	"listen",
	"connecting",
	"mpa_wait_req",
	"mpa_req_sent",
	"mpa_req_rcvd",
	"mpa_rep_sent",
	"fpdu_mode",
	"aborting",
	"closing",
	"moribund",
	"dead",
	NULL,
};

71 72 73 74 75 76 77 78
static int nocong;
module_param(nocong, int, 0644);
MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");

static int enable_ecn;
module_param(enable_ecn, int, 0644);
MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");

S
Steve Wise 已提交
79
static int dack_mode = 1;
80
module_param(dack_mode, int, 0644);
S
Steve Wise 已提交
81
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
82

83
uint c4iw_max_read_depth = 32;
84
module_param(c4iw_max_read_depth, int, 0644);
85 86
MODULE_PARM_DESC(c4iw_max_read_depth,
		 "Per-connection max ORD/IRD (default=32)");
87

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static int enable_tcp_timestamps;
module_param(enable_tcp_timestamps, int, 0644);
MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");

static int enable_tcp_sack;
module_param(enable_tcp_sack, int, 0644);
MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");

static int enable_tcp_window_scaling = 1;
module_param(enable_tcp_window_scaling, int, 0644);
MODULE_PARM_DESC(enable_tcp_window_scaling,
		 "Enable tcp window scaling (default=1)");

int c4iw_debug;
module_param(c4iw_debug, int, 0644);
MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");

105
static int peer2peer = 1;
106
module_param(peer2peer, int, 0644);
107
MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
108 109 110 111 112 113 114 115 116 117 118

static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
module_param(p2p_type, int, 0644);
MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
			   "1=RDMA_READ 0=RDMA_WRITE (default 1)");

static int ep_timeout_secs = 60;
module_param(ep_timeout_secs, int, 0644);
MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
				   "in seconds (default=60)");

119
static int mpa_rev = 2;
120 121
module_param(mpa_rev, int, 0644);
MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
122
		"1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
123
		" compliant (default=2)");
124 125 126 127 128 129 130 131 132 133 134 135 136

static int markers_enabled;
module_param(markers_enabled, int, 0644);
MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");

static int crc_enabled = 1;
module_param(crc_enabled, int, 0644);
MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");

static int rcv_win = 256 * 1024;
module_param(rcv_win, int, 0644);
MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");

137
static int snd_win = 128 * 1024;
138
module_param(snd_win, int, 0644);
139
MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
140 141 142 143 144 145 146 147

static struct workqueue_struct *workq;

static struct sk_buff_head rxq;

static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
148
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
149

150 151 152
static LIST_HEAD(timeout_list);
static spinlock_t timeout_lock;

153 154 155 156 157 158 159 160 161 162 163 164 165
static void deref_cm_id(struct c4iw_ep_common *epc)
{
	epc->cm_id->rem_ref(epc->cm_id);
	epc->cm_id = NULL;
	set_bit(CM_ID_DEREFED, &epc->history);
}

static void ref_cm_id(struct c4iw_ep_common *epc)
{
	set_bit(CM_ID_REFED, &epc->history);
	epc->cm_id->add_ref(epc->cm_id);
}

166 167 168 169
static void deref_qp(struct c4iw_ep *ep)
{
	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
	clear_bit(QP_REFERENCED, &ep->com.flags);
170
	set_bit(QP_DEREFED, &ep->com.history);
171 172 173 174 175
}

static void ref_qp(struct c4iw_ep *ep)
{
	set_bit(QP_REFERENCED, &ep->com.flags);
176
	set_bit(QP_REFED, &ep->com.history);
177 178 179
	c4iw_qp_add_ref(&ep->com.qp->ibqp);
}

180 181 182 183
static void start_ep_timer(struct c4iw_ep *ep)
{
	PDBG("%s ep %p\n", __func__, ep);
	if (timer_pending(&ep->timer)) {
184 185 186 187 188 189
		pr_err("%s timer already started! ep %p\n",
		       __func__, ep);
		return;
	}
	clear_bit(TIMEOUT, &ep->com.flags);
	c4iw_get_ep(&ep->com);
190 191 192 193 194 195
	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
	ep->timer.data = (unsigned long)ep;
	ep->timer.function = ep_timeout;
	add_timer(&ep->timer);
}

S
Steve Wise 已提交
196
static int stop_ep_timer(struct c4iw_ep *ep)
197
{
198
	PDBG("%s ep %p stopping\n", __func__, ep);
199
	del_timer_sync(&ep->timer);
S
Steve Wise 已提交
200
	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
201
		c4iw_put_ep(&ep->com);
S
Steve Wise 已提交
202 203 204
		return 0;
	}
	return 1;
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
}

static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
		  struct l2t_entry *l2e)
{
	int	error = 0;

	if (c4iw_fatal_error(rdev)) {
		kfree_skb(skb);
		PDBG("%s - device in error state - dropping\n", __func__);
		return -EIO;
	}
	error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
	if (error < 0)
		kfree_skb(skb);
220 221
	else if (error == NET_XMIT_DROP)
		return -ENOMEM;
222
	return error < 0 ? error : 0;
223 224 225 226 227 228 229 230 231 232 233 234 235 236
}

int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
{
	int	error = 0;

	if (c4iw_fatal_error(rdev)) {
		kfree_skb(skb);
		PDBG("%s - device in error state - dropping\n", __func__);
		return -EIO;
	}
	error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
	if (error < 0)
		kfree_skb(skb);
237
	return error < 0 ? error : 0;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
}

static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
{
	struct cpl_tid_release *req;

	skb = get_skb(skb, sizeof *req, GFP_KERNEL);
	if (!skb)
		return;
	req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
	INIT_TP_WR(req, hwtid);
	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
	set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
	c4iw_ofld_send(rdev, skb);
	return;
}

static void set_emss(struct c4iw_ep *ep, u16 opt)
{
257
	ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
258 259 260
		   ((AF_INET == ep->com.remote_addr.ss_family) ?
		    sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
		   sizeof(struct tcphdr);
261
	ep->mss = ep->emss;
262
	if (TCPOPT_TSTAMP_G(opt))
263
		ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
264 265
	if (ep->emss < 128)
		ep->emss = 128;
266 267
	if (ep->emss & 7)
		PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
268 269
		     TCPOPT_MSS_G(opt), ep->mss, ep->emss);
	PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
270 271 272 273 274 275 276
	     ep->mss, ep->emss);
}

static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
{
	enum c4iw_ep_state state;

277
	mutex_lock(&epc->mutex);
278
	state = epc->state;
279
	mutex_unlock(&epc->mutex);
280 281 282 283 284 285 286 287 288 289
	return state;
}

static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
	epc->state = new;
}

static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
290
	mutex_lock(&epc->mutex);
291 292
	PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
	__state_set(epc, new);
293
	mutex_unlock(&epc->mutex);
294 295 296 297 298 299 300 301 302 303
	return;
}

static void *alloc_ep(int size, gfp_t gfp)
{
	struct c4iw_ep_common *epc;

	epc = kzalloc(size, gfp);
	if (epc) {
		kref_init(&epc->kref);
304
		mutex_init(&epc->mutex);
305
		c4iw_init_wr_wait(&epc->wr_wait);
306 307 308 309 310
	}
	PDBG("%s alloc ep %p\n", __func__, epc);
	return epc;
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
static void remove_ep_tid(struct c4iw_ep *ep)
{
	unsigned long flags;

	spin_lock_irqsave(&ep->com.dev->lock, flags);
	_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
	spin_unlock_irqrestore(&ep->com.dev->lock, flags);
}

static void insert_ep_tid(struct c4iw_ep *ep)
{
	unsigned long flags;

	spin_lock_irqsave(&ep->com.dev->lock, flags);
	_insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
	spin_unlock_irqrestore(&ep->com.dev->lock, flags);
}

/*
 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
 */
static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
{
	struct c4iw_ep *ep;
	unsigned long flags;

	spin_lock_irqsave(&dev->lock, flags);
	ep = idr_find(&dev->hwtid_idr, tid);
	if (ep)
		c4iw_get_ep(&ep->com);
	spin_unlock_irqrestore(&dev->lock, flags);
	return ep;
}

345 346 347 348 349
void _c4iw_free_ep(struct kref *kref)
{
	struct c4iw_ep *ep;

	ep = container_of(kref, struct c4iw_ep, com.kref);
350
	PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
351 352
	if (test_bit(QP_REFERENCED, &ep->com.flags))
		deref_qp(ep);
353
	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
H
Hariprasad S 已提交
354 355 356
		if (ep->com.remote_addr.ss_family == AF_INET6) {
			struct sockaddr_in6 *sin6 =
					(struct sockaddr_in6 *)
357
					&ep->com.local_addr;
H
Hariprasad S 已提交
358 359 360 361 362 363

			cxgb4_clip_release(
					ep->com.dev->rdev.lldi.ports[0],
					(const u32 *)&sin6->sin6_addr.s6_addr,
					1);
		}
364 365 366
		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
		dst_release(ep->dst);
		cxgb4_l2t_release(ep->l2t);
367 368
		if (ep->mpa_skb)
			kfree_skb(ep->mpa_skb);
369 370 371 372 373 374 375
	}
	kfree(ep);
}

static void release_ep_resources(struct c4iw_ep *ep)
{
	set_bit(RELEASE_RESOURCES, &ep->com.flags);
376 377 378 379 380 381 382 383 384

	/*
	 * If we have a hwtid, then remove it from the idr table
	 * so lookups will no longer find this endpoint.  Otherwise
	 * we have a race where one thread finds the ep ptr just
	 * before the other thread is freeing the ep memory.
	 */
	if (ep->hwtid != -1)
		remove_ep_tid(ep);
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	c4iw_put_ep(&ep->com);
}

static int status2errno(int status)
{
	switch (status) {
	case CPL_ERR_NONE:
		return 0;
	case CPL_ERR_CONN_RESET:
		return -ECONNRESET;
	case CPL_ERR_ARP_MISS:
		return -EHOSTUNREACH;
	case CPL_ERR_CONN_TIMEDOUT:
		return -ETIMEDOUT;
	case CPL_ERR_TCAM_FULL:
		return -ENOMEM;
	case CPL_ERR_CONN_EXIST:
		return -EADDRINUSE;
	default:
		return -EIO;
	}
}

/*
 * Try and reuse skbs already allocated...
 */
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
{
	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
		skb_trim(skb, 0);
		skb_get(skb);
		skb_reset_transport_header(skb);
	} else {
		skb = alloc_skb(len, gfp);
	}
420
	t4_set_arp_err_handler(skb, NULL, NULL);
421 422 423
	return skb;
}

424 425
static struct net_device *get_real_dev(struct net_device *egress_dev)
{
S
Steve Wise 已提交
426
	return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
}

static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
{
	int i;

	egress_dev = get_real_dev(egress_dev);
	for (i = 0; i < dev->rdev.lldi.nports; i++)
		if (dev->rdev.lldi.ports[i] == egress_dev)
			return 1;
	return 0;
}

static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
				     __u8 *peer_ip, __be16 local_port,
				     __be16 peer_port, u8 tos,
				     __u32 sin6_scope_id)
{
	struct dst_entry *dst = NULL;

	if (IS_ENABLED(CONFIG_IPV6)) {
		struct flowi6 fl6;

		memset(&fl6, 0, sizeof(fl6));
		memcpy(&fl6.daddr, peer_ip, 16);
		memcpy(&fl6.saddr, local_ip, 16);
		if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
			fl6.flowi6_oif = sin6_scope_id;
		dst = ip6_route_output(&init_net, NULL, &fl6);
		if (!dst)
			goto out;
		if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
		    !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
			dst_release(dst);
			dst = NULL;
		}
	}

out:
	return dst;
}

static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
470 471 472 473
				 __be32 peer_ip, __be16 local_port,
				 __be16 peer_port, u8 tos)
{
	struct rtable *rt;
474
	struct flowi4 fl4;
475
	struct neighbour *n;
476

477
	rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
478 479
				   peer_port, local_port, IPPROTO_TCP,
				   tos, 0);
480
	if (IS_ERR(rt))
481
		return NULL;
482 483 484
	n = dst_neigh_lookup(&rt->dst, &peer_ip);
	if (!n)
		return NULL;
485 486
	if (!our_interface(dev, n->dev) &&
	    !(n->dev->flags & IFF_LOOPBACK)) {
487
		neigh_release(n);
488 489 490 491 492
		dst_release(&rt->dst);
		return NULL;
	}
	neigh_release(n);
	return &rt->dst;
493 494 495 496
}

static void arp_failure_discard(void *handle, struct sk_buff *skb)
{
497
	pr_err(MOD "ARP failure\n");
498 499 500
	kfree_skb(skb);
}

501
enum {
502
	NUM_FAKE_CPLS = 2,
503
	FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
504
	FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
505 506 507 508 509 510 511 512 513 514 515
};

static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;

	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
	release_ep_resources(ep);
	return 0;
}

516 517 518 519 520 521 522 523 524 525
static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;

	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
	c4iw_put_ep(&ep->parent_ep->com);
	release_ep_resources(ep);
	return 0;
}

526 527 528 529 530 531
/*
 * Fake up a special CPL opcode and call sched() so process_work() will call
 * _put_ep_safe() in a safe context to free the ep resources.  This is needed
 * because ARP error handlers are called in an ATOMIC context, and
 * _c4iw_free_ep() needs to block.
 */
532 533
static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
				  int cpl)
534 535 536 537
{
	struct cpl_act_establish *rpl = cplhdr(skb);

	/* Set our special ARP_FAILURE opcode */
538
	rpl->ot.opcode = cpl;
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556

	/*
	 * Save ep in the skb->cb area, after where sched() will save the dev
	 * ptr.
	 */
	*((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
	sched(ep->com.dev, skb);
}

/* Handle an ARP failure for an accept */
static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
{
	struct c4iw_ep *ep = handle;

	pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n",
	       ep->hwtid);

	__state_set(&ep->com, DEAD);
557
	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
558 559
}

560 561 562 563 564
/*
 * Handle an ARP failure for an active open.
 */
static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
565 566
	struct c4iw_ep *ep = handle;

M
Masanari Iida 已提交
567
	printk(KERN_ERR MOD "ARP failure during connect\n");
568
	connect_reply_upcall(ep, -EHOSTUNREACH);
569
	__state_set(&ep->com, DEAD);
H
Hariprasad S 已提交
570 571
	if (ep->com.remote_addr.ss_family == AF_INET6) {
		struct sockaddr_in6 *sin6 =
572
			(struct sockaddr_in6 *)&ep->com.local_addr;
H
Hariprasad S 已提交
573 574 575
		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
	}
576 577
	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
578
	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
579 580 581 582 583 584 585 586
}

/*
 * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
 * and send it along.
 */
static void abort_arp_failure(void *handle, struct sk_buff *skb)
{
587 588 589
	int ret;
	struct c4iw_ep *ep = handle;
	struct c4iw_rdev *rdev = &ep->com.dev->rdev;
590 591 592 593
	struct cpl_abort_req *req = cplhdr(skb);

	PDBG("%s rdev %p\n", __func__, rdev);
	req->cmd = CPL_ABORT_NO_RST;
594 595 596 597 598
	ret = c4iw_ofld_send(rdev, skb);
	if (ret) {
		__state_set(&ep->com, DEAD);
		queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
	}
599 600
}

601
static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
602 603 604 605
{
	unsigned int flowclen = 80;
	struct fw_flowc_wr *flowc;
	int i;
H
Hariprasad S 已提交
606 607 608 609 610 611 612
	u16 vlan = ep->l2t->vlan;
	int nparams;

	if (vlan == CPL_L2T_VLAN_NONE)
		nparams = 8;
	else
		nparams = 9;
613 614 615 616

	skb = get_skb(skb, flowclen, GFP_KERNEL);
	flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);

617
	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
H
Hariprasad S 已提交
618
					   FW_FLOWC_WR_NPARAMS_V(nparams));
619 620
	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
					  16)) | FW_WR_FLOWID_V(ep->hwtid));
621 622

	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
623
	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
624
					    (ep->com.dev->rdev.lldi.pf));
625 626 627 628 629 630 631 632 633 634 635
	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
	flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
	flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
	flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
	flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
636
	flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
637 638
	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
	flowc->mnemval[7].val = cpu_to_be32(ep->emss);
H
Hariprasad S 已提交
639 640 641 642 643 644 645 646 647 648 649
	if (nparams == 9) {
		u16 pri;

		pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
		flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
		flowc->mnemval[8].val = cpu_to_be32(pri);
	} else {
		/* Pad WR to 16 byte boundary */
		flowc->mnemval[8].mnemonic = 0;
		flowc->mnemval[8].val = 0;
	}
650 651 652 653 654 655 656
	for (i = 0; i < 9; i++) {
		flowc->mnemval[i].r4[0] = 0;
		flowc->mnemval[i].r4[1] = 0;
		flowc->mnemval[i].r4[2] = 0;
	}

	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
657
	return c4iw_ofld_send(&ep->com.dev->rdev, skb);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
}

static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
{
	struct cpl_close_con_req *req;
	struct sk_buff *skb;
	int wrlen = roundup(sizeof *req, 16);

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	skb = get_skb(NULL, wrlen, gfp);
	if (!skb) {
		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
	req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
	memset(req, 0, wrlen);
	INIT_TP_WR(req, ep->hwtid);
	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
						    ep->hwtid));
	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}

static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{
	struct cpl_abort_req *req;
	int wrlen = roundup(sizeof *req, 16);

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	skb = get_skb(skb, wrlen, gfp);
	if (!skb) {
		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
		       __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
695
	t4_set_arp_err_handler(skb, ep, abort_arp_failure);
696 697 698 699 700 701 702 703
	req = (struct cpl_abort_req *) skb_put(skb, wrlen);
	memset(req, 0, wrlen);
	INIT_TP_WR(req, ep->hwtid);
	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
	req->cmd = CPL_ABORT_SEND_RST;
	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}

704
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
705
		     unsigned int *idx, int use_ts, int ipv6)
706
{
707 708 709
	unsigned short hdr_size = (ipv6 ?
				   sizeof(struct ipv6hdr) :
				   sizeof(struct iphdr)) +
710
				  sizeof(struct tcphdr) +
711 712
				  (use_ts ?
				   round_up(TCPOLEN_TIMESTAMP, 4) : 0);
713 714 715 716 717
	unsigned short data_size = mtu - hdr_size;

	cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
}

718 719
static int send_connect(struct c4iw_ep *ep)
{
720 721 722 723 724 725
	struct cpl_act_open_req *req = NULL;
	struct cpl_t5_act_open_req *t5req = NULL;
	struct cpl_t6_act_open_req *t6req = NULL;
	struct cpl_act_open_req6 *req6 = NULL;
	struct cpl_t5_act_open_req6 *t5req6 = NULL;
	struct cpl_t6_act_open_req6 *t6req6 = NULL;
726 727 728 729 730
	struct sk_buff *skb;
	u64 opt0;
	u32 opt2;
	unsigned int mtu_idx;
	int wscale;
731
	int win, sizev4, sizev6, wrlen;
732
	struct sockaddr_in *la = (struct sockaddr_in *)
733
				 &ep->com.local_addr;
734
	struct sockaddr_in *ra = (struct sockaddr_in *)
735
				 &ep->com.remote_addr;
736
	struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
737
				   &ep->com.local_addr;
738
	struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
739
				   &ep->com.remote_addr;
H
Hariprasad S 已提交
740
	int ret;
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
	u32 isn = (prandom_u32() & ~7UL) - 1;

	switch (CHELSIO_CHIP_VERSION(adapter_type)) {
	case CHELSIO_T4:
		sizev4 = sizeof(struct cpl_act_open_req);
		sizev6 = sizeof(struct cpl_act_open_req6);
		break;
	case CHELSIO_T5:
		sizev4 = sizeof(struct cpl_t5_act_open_req);
		sizev6 = sizeof(struct cpl_t5_act_open_req6);
		break;
	case CHELSIO_T6:
		sizev4 = sizeof(struct cpl_t6_act_open_req);
		sizev6 = sizeof(struct cpl_t6_act_open_req6);
		break;
	default:
		pr_err("T%d Chip is not supported\n",
		       CHELSIO_CHIP_VERSION(adapter_type));
		return -EINVAL;
	}
762 763 764 765

	wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
			roundup(sizev4, 16) :
			roundup(sizev6, 16);
766 767 768 769 770 771 772 773 774

	PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);

	skb = get_skb(NULL, wrlen, GFP_KERNEL);
	if (!skb) {
		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
		       __func__);
		return -ENOMEM;
	}
S
Steve Wise 已提交
775
	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
776

777
	best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
778 779
		 enable_tcp_timestamps,
		 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
780
	wscale = compute_wscale(rcv_win);
781 782 783 784 785 786

	/*
	 * Specify the largest window that will fit in opt0. The
	 * remainder will be specified in the rx_data_ack.
	 */
	win = ep->rcv_win >> 10;
787 788
	if (win > RCV_BUFSIZ_M)
		win = RCV_BUFSIZ_M;
789

790
	opt0 = (nocong ? NO_CONG_F : 0) |
791
	       KEEP_ALIVE_F |
792
	       DELACK_F |
793 794 795 796 797
	       WND_SCALE_V(wscale) |
	       MSS_IDX_V(mtu_idx) |
	       L2T_IDX_V(ep->l2t->idx) |
	       TX_CHAN_V(ep->tx_chan) |
	       SMAC_SEL_V(ep->smac_idx) |
H
Hariprasad S 已提交
798
	       DSCP_V(ep->tos >> 2) |
799 800 801
	       ULP_MODE_V(ULP_MODE_TCPDDP) |
	       RCV_BUFSIZ_V(win);
	opt2 = RX_CHANNEL_V(0) |
802
	       CCTRL_ECN_V(enable_ecn) |
803
	       RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
804
	if (enable_tcp_timestamps)
805
		opt2 |= TSTAMPS_EN_F;
806
	if (enable_tcp_sack)
807
		opt2 |= SACK_EN_F;
808
	if (wscale && enable_tcp_window_scaling)
809
		opt2 |= WND_SCALE_EN_F;
810 811 812 813
	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
		if (peer2peer)
			isn += 4;

814
		opt2 |= T5_OPT_2_VALID_F;
815
		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
816
		opt2 |= T5_ISS_F;
817
	}
H
Hariprasad S 已提交
818 819 820 821 822

	if (ep->com.remote_addr.ss_family == AF_INET6)
		cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
			       (const u32 *)&la6->sin6_addr.s6_addr, 1);

823
	t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
824

825 826 827 828
	if (ep->com.remote_addr.ss_family == AF_INET) {
		switch (CHELSIO_CHIP_VERSION(adapter_type)) {
		case CHELSIO_T4:
			req = (struct cpl_act_open_req *)skb_put(skb, wrlen);
829
			INIT_TP_WR(req, 0);
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
			break;
		case CHELSIO_T5:
			t5req = (struct cpl_t5_act_open_req *)skb_put(skb,
					wrlen);
			INIT_TP_WR(t5req, 0);
			req = (struct cpl_act_open_req *)t5req;
			break;
		case CHELSIO_T6:
			t6req = (struct cpl_t6_act_open_req *)skb_put(skb,
					wrlen);
			INIT_TP_WR(t6req, 0);
			req = (struct cpl_act_open_req *)t6req;
			t5req = (struct cpl_t5_act_open_req *)t6req;
			break;
		default:
			pr_err("T%d Chip is not supported\n",
			       CHELSIO_CHIP_VERSION(adapter_type));
			ret = -EINVAL;
			goto clip_release;
		}

		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
					((ep->rss_qid<<14) | ep->atid)));
		req->local_port = la->sin_port;
		req->peer_port = ra->sin_port;
		req->local_ip = la->sin_addr.s_addr;
		req->peer_ip = ra->sin_addr.s_addr;
		req->opt0 = cpu_to_be64(opt0);

		if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
860 861 862
			req->params = cpu_to_be32(cxgb4_select_ntuple(
						ep->com.dev->rdev.lldi.ports[0],
						ep->l2t));
863 864
			req->opt2 = cpu_to_be32(opt2);
		} else {
865 866 867 868 869 870 871 872 873 874 875
			t5req->params = cpu_to_be64(FILTER_TUPLE_V(
						cxgb4_select_ntuple(
						ep->com.dev->rdev.lldi.ports[0],
						ep->l2t)));
			t5req->rsvd = cpu_to_be32(isn);
			PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
			t5req->opt2 = cpu_to_be32(opt2);
		}
	} else {
		switch (CHELSIO_CHIP_VERSION(adapter_type)) {
		case CHELSIO_T4:
876 877
			req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
			INIT_TP_WR(req6, 0);
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
			break;
		case CHELSIO_T5:
			t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb,
					wrlen);
			INIT_TP_WR(t5req6, 0);
			req6 = (struct cpl_act_open_req6 *)t5req6;
			break;
		case CHELSIO_T6:
			t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb,
					wrlen);
			INIT_TP_WR(t6req6, 0);
			req6 = (struct cpl_act_open_req6 *)t6req6;
			t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
			break;
		default:
			pr_err("T%d Chip is not supported\n",
			       CHELSIO_CHIP_VERSION(adapter_type));
			ret = -EINVAL;
			goto clip_release;
		}

		OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
					((ep->rss_qid<<14)|ep->atid)));
		req6->local_port = la6->sin6_port;
		req6->peer_port = ra6->sin6_port;
		req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
		req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
		req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
		req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
		req6->opt0 = cpu_to_be64(opt0);

		if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
910 911 912
			req6->params = cpu_to_be32(cxgb4_select_ntuple(
						ep->com.dev->rdev.lldi.ports[0],
						ep->l2t));
913 914
			req6->opt2 = cpu_to_be32(opt2);
		} else {
915 916
			t5req6->params = cpu_to_be64(FILTER_TUPLE_V(
						cxgb4_select_ntuple(
917
						ep->com.dev->rdev.lldi.ports[0],
918
						ep->l2t)));
919 920 921
			t5req6->rsvd = cpu_to_be32(isn);
			PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
			t5req6->opt2 = cpu_to_be32(opt2);
922
		}
923 924
	}

925
	set_bit(ACT_OPEN_REQ, &ep->com.history);
H
Hariprasad S 已提交
926
	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
927
clip_release:
H
Hariprasad S 已提交
928 929 930 931
	if (ret && ep->com.remote_addr.ss_family == AF_INET6)
		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
				   (const u32 *)&la6->sin6_addr.s6_addr, 1);
	return ret;
932 933
}

934 935
static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
			u8 mpa_rev_to_use)
936
{
937
	int mpalen, wrlen, ret;
938 939
	struct fw_ofld_tx_data_wr *req;
	struct mpa_message *mpa;
940
	struct mpa_v2_conn_params mpa_v2_params;
941 942 943 944 945 946

	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);

	BUG_ON(skb_cloned(skb));

	mpalen = sizeof(*mpa) + ep->plen;
947 948
	if (mpa_rev_to_use == 2)
		mpalen += sizeof(struct mpa_v2_conn_params);
949 950 951 952
	wrlen = roundup(mpalen + sizeof *req, 16);
	skb = get_skb(skb, wrlen, GFP_KERNEL);
	if (!skb) {
		connect_reply_upcall(ep, -ENOMEM);
953
		return -ENOMEM;
954 955 956 957 958 959
	}
	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);

	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
	memset(req, 0, wrlen);
	req->op_to_immdlen = cpu_to_be32(
960 961 962
		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
		FW_WR_COMPL_F |
		FW_WR_IMMDLEN_V(mpalen));
963
	req->flowid_len16 = cpu_to_be32(
964 965
		FW_WR_FLOWID_V(ep->hwtid) |
		FW_WR_LEN16_V(wrlen >> 4));
966 967
	req->plen = cpu_to_be32(mpalen);
	req->tunnel_to_proxy = cpu_to_be32(
968 969
		FW_OFLD_TX_DATA_WR_FLUSH_F |
		FW_OFLD_TX_DATA_WR_SHOVE_F);
970 971 972 973

	mpa = (struct mpa_message *)(req + 1);
	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
974 975
		     (markers_enabled ? MPA_MARKERS : 0) |
		     (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
976
	mpa->private_data_size = htons(ep->plen);
977
	mpa->revision = mpa_rev_to_use;
978
	if (mpa_rev_to_use == 1) {
979
		ep->tried_with_mpa_v1 = 1;
980 981
		ep->retry_with_mpa_v1 = 0;
	}
982 983

	if (mpa_rev_to_use == 2) {
984 985
		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
					       sizeof (struct mpa_v2_conn_params));
986 987
		PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
		     ep->ord);
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
		mpa_v2_params.ird = htons((u16)ep->ird);
		mpa_v2_params.ord = htons((u16)ep->ord);

		if (peer2peer) {
			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
				mpa_v2_params.ord |=
					htons(MPA_V2_RDMA_WRITE_RTR);
			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
				mpa_v2_params.ord |=
					htons(MPA_V2_RDMA_READ_RTR);
		}
		memcpy(mpa->private_data, &mpa_v2_params,
		       sizeof(struct mpa_v2_conn_params));
1002

1003 1004 1005 1006 1007 1008 1009 1010
		if (ep->plen)
			memcpy(mpa->private_data +
			       sizeof(struct mpa_v2_conn_params),
			       ep->mpa_pkt + sizeof(*mpa), ep->plen);
	} else
		if (ep->plen)
			memcpy(mpa->private_data,
					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

	/*
	 * Reference the mpa skb.  This ensures the data area
	 * will remain in memory until the hw acks the tx.
	 * Function fw4_ack() will deref it.
	 */
	skb_get(skb);
	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
	BUG_ON(ep->mpa_skb);
	ep->mpa_skb = skb;
1021 1022 1023
	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
	if (ret)
		return ret;
1024
	start_ep_timer(ep);
1025
	__state_set(&ep->com, MPA_REQ_SENT);
1026
	ep->mpa_attr.initiator = 1;
1027
	ep->snd_seq += mpalen;
1028
	return ret;
1029 1030 1031 1032 1033 1034 1035 1036
}

static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
{
	int mpalen, wrlen;
	struct fw_ofld_tx_data_wr *req;
	struct mpa_message *mpa;
	struct sk_buff *skb;
1037
	struct mpa_v2_conn_params mpa_v2_params;
1038 1039 1040 1041

	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);

	mpalen = sizeof(*mpa) + plen;
1042 1043
	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
		mpalen += sizeof(struct mpa_v2_conn_params);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	wrlen = roundup(mpalen + sizeof *req, 16);

	skb = get_skb(NULL, wrlen, GFP_KERNEL);
	if (!skb) {
		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);

	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
	memset(req, 0, wrlen);
	req->op_to_immdlen = cpu_to_be32(
1056 1057 1058
		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
		FW_WR_COMPL_F |
		FW_WR_IMMDLEN_V(mpalen));
1059
	req->flowid_len16 = cpu_to_be32(
1060 1061
		FW_WR_FLOWID_V(ep->hwtid) |
		FW_WR_LEN16_V(wrlen >> 4));
1062 1063
	req->plen = cpu_to_be32(mpalen);
	req->tunnel_to_proxy = cpu_to_be32(
1064 1065
		FW_OFLD_TX_DATA_WR_FLUSH_F |
		FW_OFLD_TX_DATA_WR_SHOVE_F);
1066 1067 1068 1069 1070

	mpa = (struct mpa_message *)(req + 1);
	memset(mpa, 0, sizeof(*mpa));
	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
	mpa->flags = MPA_REJECT;
1071
	mpa->revision = ep->mpa_attr.version;
1072
	mpa->private_data_size = htons(plen);
1073 1074 1075

	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1076 1077
		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
					       sizeof (struct mpa_v2_conn_params));
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
		mpa_v2_params.ird = htons(((u16)ep->ird) |
					  (peer2peer ? MPA_V2_PEER2PEER_MODEL :
					   0));
		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
					  (p2p_type ==
					   FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
					   MPA_V2_RDMA_WRITE_RTR : p2p_type ==
					   FW_RI_INIT_P2PTYPE_READ_REQ ?
					   MPA_V2_RDMA_READ_RTR : 0) : 0));
		memcpy(mpa->private_data, &mpa_v2_params,
		       sizeof(struct mpa_v2_conn_params));

		if (ep->plen)
			memcpy(mpa->private_data +
			       sizeof(struct mpa_v2_conn_params), pdata, plen);
	} else
		if (plen)
			memcpy(mpa->private_data, pdata, plen);
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105

	/*
	 * Reference the mpa skb again.  This ensures the data area
	 * will remain in memory until the hw acks the tx.
	 * Function fw4_ack() will deref it.
	 */
	skb_get(skb);
	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
	BUG_ON(ep->mpa_skb);
	ep->mpa_skb = skb;
1106
	ep->snd_seq += mpalen;
1107 1108 1109 1110 1111 1112 1113 1114 1115
	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}

static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
{
	int mpalen, wrlen;
	struct fw_ofld_tx_data_wr *req;
	struct mpa_message *mpa;
	struct sk_buff *skb;
1116
	struct mpa_v2_conn_params mpa_v2_params;
1117 1118 1119 1120

	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);

	mpalen = sizeof(*mpa) + plen;
1121 1122
	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
		mpalen += sizeof(struct mpa_v2_conn_params);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	wrlen = roundup(mpalen + sizeof *req, 16);

	skb = get_skb(NULL, wrlen, GFP_KERNEL);
	if (!skb) {
		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);

	req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
	memset(req, 0, wrlen);
	req->op_to_immdlen = cpu_to_be32(
1135 1136 1137
		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
		FW_WR_COMPL_F |
		FW_WR_IMMDLEN_V(mpalen));
1138
	req->flowid_len16 = cpu_to_be32(
1139 1140
		FW_WR_FLOWID_V(ep->hwtid) |
		FW_WR_LEN16_V(wrlen >> 4));
1141 1142
	req->plen = cpu_to_be32(mpalen);
	req->tunnel_to_proxy = cpu_to_be32(
1143 1144
		FW_OFLD_TX_DATA_WR_FLUSH_F |
		FW_OFLD_TX_DATA_WR_SHOVE_F);
1145 1146 1147 1148 1149 1150

	mpa = (struct mpa_message *)(req + 1);
	memset(mpa, 0, sizeof(*mpa));
	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
		     (markers_enabled ? MPA_MARKERS : 0);
1151
	mpa->revision = ep->mpa_attr.version;
1152
	mpa->private_data_size = htons(plen);
1153 1154 1155

	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1156 1157
		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
					       sizeof (struct mpa_v2_conn_params));
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
		mpa_v2_params.ird = htons((u16)ep->ird);
		mpa_v2_params.ord = htons((u16)ep->ord);
		if (peer2peer && (ep->mpa_attr.p2p_type !=
					FW_RI_INIT_P2PTYPE_DISABLED)) {
			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);

			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
				mpa_v2_params.ord |=
					htons(MPA_V2_RDMA_WRITE_RTR);
			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
				mpa_v2_params.ord |=
					htons(MPA_V2_RDMA_READ_RTR);
		}

		memcpy(mpa->private_data, &mpa_v2_params,
		       sizeof(struct mpa_v2_conn_params));

		if (ep->plen)
			memcpy(mpa->private_data +
			       sizeof(struct mpa_v2_conn_params), pdata, plen);
	} else
		if (plen)
			memcpy(mpa->private_data, pdata, plen);
1181 1182 1183 1184 1185 1186 1187 1188

	/*
	 * Reference the mpa skb.  This ensures the data area
	 * will remain in memory until the hw acks the tx.
	 * Function fw4_ack() will deref it.
	 */
	skb_get(skb);
	ep->mpa_skb = skb;
1189
	__state_set(&ep->com, MPA_REP_SENT);
1190
	ep->snd_seq += mpalen;
1191 1192 1193 1194 1195 1196 1197 1198
	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}

static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;
	struct cpl_act_establish *req = cplhdr(skb);
	unsigned int tid = GET_TID(req);
1199
	unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
1200
	struct tid_info *t = dev->rdev.lldi.tids;
1201
	int ret;
1202 1203 1204 1205 1206 1207

	ep = lookup_atid(t, atid);

	PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
	     be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));

1208
	mutex_lock(&ep->com.mutex);
1209 1210 1211 1212 1213
	dst_confirm(ep->dst);

	/* setup the hwtid for this connection */
	ep->hwtid = tid;
	cxgb4_insert_tid(t, ep, tid);
1214
	insert_ep_tid(ep);
1215 1216 1217 1218 1219 1220 1221

	ep->snd_seq = be32_to_cpu(req->snd_isn);
	ep->rcv_seq = be32_to_cpu(req->rcv_isn);

	set_emss(ep, ntohs(req->tcp_opt));

	/* dealloc the atid */
1222
	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1223
	cxgb4_free_atid(t, atid);
1224
	set_bit(ACT_ESTAB, &ep->com.history);
1225 1226

	/* start MPA negotiation */
1227 1228 1229
	ret = send_flowc(ep, NULL);
	if (ret)
		goto err;
1230
	if (ep->retry_with_mpa_v1)
1231
		ret = send_mpa_req(ep, skb, 1);
1232
	else
1233 1234 1235
		ret = send_mpa_req(ep, skb, mpa_rev);
	if (ret)
		goto err;
1236
	mutex_unlock(&ep->com.mutex);
1237
	return 0;
1238 1239 1240 1241 1242
err:
	mutex_unlock(&ep->com.mutex);
	connect_reply_upcall(ep, -ENOMEM);
	c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
	return 0;
1243 1244
}

1245
static void close_complete_upcall(struct c4iw_ep *ep, int status)
1246 1247 1248 1249 1250 1251
{
	struct iw_cm_event event;

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	memset(&event, 0, sizeof(event));
	event.event = IW_CM_EVENT_CLOSE;
1252
	event.status = status;
1253 1254 1255 1256
	if (ep->com.cm_id) {
		PDBG("close complete delivered ep %p cm_id %p tid %u\n",
		     ep, ep->com.cm_id, ep->hwtid);
		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1257
		deref_cm_id(&ep->com);
1258
		set_bit(CLOSE_UPCALL, &ep->com.history);
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	}
}

static void peer_close_upcall(struct c4iw_ep *ep)
{
	struct iw_cm_event event;

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	memset(&event, 0, sizeof(event));
	event.event = IW_CM_EVENT_DISCONNECT;
	if (ep->com.cm_id) {
		PDBG("peer close delivered ep %p cm_id %p tid %u\n",
		     ep, ep->com.cm_id, ep->hwtid);
		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1273
		set_bit(DISCONN_UPCALL, &ep->com.history);
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
	}
}

static void peer_abort_upcall(struct c4iw_ep *ep)
{
	struct iw_cm_event event;

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	memset(&event, 0, sizeof(event));
	event.event = IW_CM_EVENT_CLOSE;
	event.status = -ECONNRESET;
	if (ep->com.cm_id) {
		PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
		     ep->com.cm_id, ep->hwtid);
		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1289
		deref_cm_id(&ep->com);
1290
		set_bit(ABORT_UPCALL, &ep->com.history);
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
	}
}

static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{
	struct iw_cm_event event;

	PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
	memset(&event, 0, sizeof(event));
	event.event = IW_CM_EVENT_CONNECT_REPLY;
	event.status = status;
1302 1303 1304 1305
	memcpy(&event.local_addr, &ep->com.local_addr,
	       sizeof(ep->com.local_addr));
	memcpy(&event.remote_addr, &ep->com.remote_addr,
	       sizeof(ep->com.remote_addr));
1306 1307

	if ((status == 0) || (status == -ECONNREFUSED)) {
1308 1309
		if (!ep->tried_with_mpa_v1) {
			/* this means MPA_v2 is used */
1310 1311
			event.ord = ep->ird;
			event.ird = ep->ord;
1312 1313 1314 1315 1316 1317 1318
			event.private_data_len = ep->plen -
				sizeof(struct mpa_v2_conn_params);
			event.private_data = ep->mpa_pkt +
				sizeof(struct mpa_message) +
				sizeof(struct mpa_v2_conn_params);
		} else {
			/* this means MPA_v1 is used */
1319 1320
			event.ord = cur_max_read_depth(ep->com.dev);
			event.ird = cur_max_read_depth(ep->com.dev);
1321 1322 1323 1324
			event.private_data_len = ep->plen;
			event.private_data = ep->mpa_pkt +
				sizeof(struct mpa_message);
		}
1325
	}
1326 1327 1328

	PDBG("%s ep %p tid %u status %d\n", __func__, ep,
	     ep->hwtid, status);
1329
	set_bit(CONN_RPL_UPCALL, &ep->com.history);
1330 1331
	ep->com.cm_id->event_handler(ep->com.cm_id, &event);

1332 1333
	if (status < 0)
		deref_cm_id(&ep->com);
1334 1335
}

1336
static int connect_request_upcall(struct c4iw_ep *ep)
1337 1338
{
	struct iw_cm_event event;
1339
	int ret;
1340 1341 1342 1343

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	memset(&event, 0, sizeof(event));
	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1344 1345 1346 1347
	memcpy(&event.local_addr, &ep->com.local_addr,
	       sizeof(ep->com.local_addr));
	memcpy(&event.remote_addr, &ep->com.remote_addr,
	       sizeof(ep->com.remote_addr));
1348
	event.provider_data = ep;
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	if (!ep->tried_with_mpa_v1) {
		/* this means MPA_v2 is used */
		event.ord = ep->ord;
		event.ird = ep->ird;
		event.private_data_len = ep->plen -
			sizeof(struct mpa_v2_conn_params);
		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
			sizeof(struct mpa_v2_conn_params);
	} else {
		/* this means MPA_v1 is used. Send max supported */
1359 1360
		event.ord = cur_max_read_depth(ep->com.dev);
		event.ird = cur_max_read_depth(ep->com.dev);
1361 1362 1363
		event.private_data_len = ep->plen;
		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
	}
1364 1365 1366 1367 1368
	c4iw_get_ep(&ep->com);
	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
						      &event);
	if (ret)
		c4iw_put_ep(&ep->com);
1369
	set_bit(CONNREQ_UPCALL, &ep->com.history);
1370
	c4iw_put_ep(&ep->parent_ep->com);
1371
	return ret;
1372 1373 1374 1375 1376 1377 1378 1379 1380
}

static void established_upcall(struct c4iw_ep *ep)
{
	struct iw_cm_event event;

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	memset(&event, 0, sizeof(event));
	event.event = IW_CM_EVENT_ESTABLISHED;
1381 1382
	event.ird = ep->ord;
	event.ord = ep->ird;
1383 1384 1385
	if (ep->com.cm_id) {
		PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1386
		set_bit(ESTAB_UPCALL, &ep->com.history);
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
	}
}

static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
{
	struct cpl_rx_data_ack *req;
	struct sk_buff *skb;
	int wrlen = roundup(sizeof *req, 16);

	PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
	skb = get_skb(NULL, wrlen, GFP_KERNEL);
	if (!skb) {
		printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
		return 0;
	}

1403 1404 1405 1406 1407
	/*
	 * If we couldn't specify the entire rcv window at connection setup
	 * due to the limit in the number of bits in the RCV_BUFSIZ field,
	 * then add the overage in to the credits returned.
	 */
1408 1409
	if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
		credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1410

1411 1412 1413 1414 1415
	req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
	memset(req, 0, wrlen);
	INIT_TP_WR(req, ep->hwtid);
	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
						    ep->hwtid));
1416
	req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
1417 1418
				       RX_DACK_CHANGE_F |
				       RX_DACK_MODE_V(dack_mode));
S
Steve Wise 已提交
1419
	set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
1420 1421 1422 1423
	c4iw_ofld_send(&ep->com.dev->rdev, skb);
	return credits;
}

1424 1425
#define RELAXED_IRD_NEGOTIATION 1

1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
/*
 * process_mpa_reply - process streaming mode MPA reply
 *
 * Returns:
 *
 * 0 upon success indicating a connect request was delivered to the ULP
 * or the mpa request is incomplete but valid so far.
 *
 * 1 if a failure requires the caller to close the connection.
 *
 * 2 if a failure requires the caller to abort the connection.
 */
1438
static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1439 1440
{
	struct mpa_message *mpa;
1441
	struct mpa_v2_conn_params *mpa_v2_params;
1442
	u16 plen;
1443 1444
	u16 resp_ird, resp_ord;
	u8 rtr_mismatch = 0, insuff_ird = 0;
1445 1446 1447
	struct c4iw_qp_attributes attrs;
	enum c4iw_qp_attr_mask mask;
	int err;
1448
	int disconnect = 0;
1449 1450 1451 1452 1453 1454 1455 1456 1457

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);

	/*
	 * If we get more than the supported amount of private data
	 * then we must fail this connection.
	 */
	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
		err = -EINVAL;
1458
		goto err_stop_timer;
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	}

	/*
	 * copy the new data into our accumulation buffer.
	 */
	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
				  skb->len);
	ep->mpa_pkt_len += skb->len;

	/*
	 * if we don't even have the mpa message, then bail.
	 */
	if (ep->mpa_pkt_len < sizeof(*mpa))
1472
		return 0;
1473 1474 1475
	mpa = (struct mpa_message *) ep->mpa_pkt;

	/* Validate MPA header. */
1476 1477 1478
	if (mpa->revision > mpa_rev) {
		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
		       " Received = %d\n", __func__, mpa_rev, mpa->revision);
1479
		err = -EPROTO;
1480
		goto err_stop_timer;
1481 1482 1483
	}
	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
		err = -EPROTO;
1484
		goto err_stop_timer;
1485 1486 1487 1488 1489 1490 1491 1492 1493
	}

	plen = ntohs(mpa->private_data_size);

	/*
	 * Fail if there's too much private data.
	 */
	if (plen > MPA_MAX_PRIVATE_DATA) {
		err = -EPROTO;
1494
		goto err_stop_timer;
1495 1496 1497 1498 1499 1500 1501
	}

	/*
	 * If plen does not account for pkt size
	 */
	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
		err = -EPROTO;
1502
		goto err_stop_timer;
1503 1504 1505 1506 1507 1508 1509 1510 1511
	}

	ep->plen = (u8) plen;

	/*
	 * If we don't have all the pdata yet, then bail.
	 * We'll continue process when more data arrives.
	 */
	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1512
		return 0;
1513 1514 1515

	if (mpa->flags & MPA_REJECT) {
		err = -ECONNREFUSED;
1516
		goto err_stop_timer;
1517 1518
	}

1519 1520 1521 1522 1523 1524 1525 1526
	/*
	 * Stop mpa timer.  If it expired, then
	 * we ignore the MPA reply.  process_timeout()
	 * will abort the connection.
	 */
	if (stop_ep_timer(ep))
		return 0;

1527 1528 1529 1530 1531
	/*
	 * If we get here we have accumulated the entire mpa
	 * start reply message including private data. And
	 * the MPA header is valid.
	 */
1532
	__state_set(&ep->com, FPDU_MODE);
1533 1534 1535
	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
	ep->mpa_attr.recv_marker_enabled = markers_enabled;
	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
	ep->mpa_attr.version = mpa->revision;
	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;

	if (mpa->revision == 2) {
		ep->mpa_attr.enhanced_rdma_conn =
			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
		if (ep->mpa_attr.enhanced_rdma_conn) {
			mpa_v2_params = (struct mpa_v2_conn_params *)
				(ep->mpa_pkt + sizeof(*mpa));
			resp_ird = ntohs(mpa_v2_params->ird) &
				MPA_V2_IRD_ORD_MASK;
			resp_ord = ntohs(mpa_v2_params->ord) &
				MPA_V2_IRD_ORD_MASK;
1549 1550
			PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
			     __func__, resp_ird, resp_ord, ep->ird, ep->ord);
1551 1552 1553 1554 1555 1556

			/*
			 * This is a double-check. Ideally, below checks are
			 * not required since ird/ord stuff has been taken
			 * care of in c4iw_accept_cr
			 */
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
			if (ep->ird < resp_ord) {
				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
				    ep->com.dev->rdev.lldi.max_ordird_qp)
					ep->ird = resp_ord;
				else
					insuff_ird = 1;
			} else if (ep->ird > resp_ord) {
				ep->ird = resp_ord;
			}
			if (ep->ord > resp_ird) {
				if (RELAXED_IRD_NEGOTIATION)
					ep->ord = resp_ird;
				else
					insuff_ird = 1;
			}
			if (insuff_ird) {
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
				err = -ENOMEM;
				ep->ird = resp_ord;
				ep->ord = resp_ird;
			}

			if (ntohs(mpa_v2_params->ird) &
					MPA_V2_PEER2PEER_MODEL) {
				if (ntohs(mpa_v2_params->ord) &
						MPA_V2_RDMA_WRITE_RTR)
					ep->mpa_attr.p2p_type =
						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
				else if (ntohs(mpa_v2_params->ord) &
						MPA_V2_RDMA_READ_RTR)
					ep->mpa_attr.p2p_type =
						FW_RI_INIT_P2PTYPE_READ_REQ;
			}
		}
	} else if (mpa->revision == 1)
		if (peer2peer)
			ep->mpa_attr.p2p_type = p2p_type;

1594
	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
	     "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
	     "%d\n", __func__, ep->mpa_attr.crc_enabled,
	     ep->mpa_attr.recv_marker_enabled,
	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
	     ep->mpa_attr.p2p_type, p2p_type);

	/*
	 * If responder's RTR does not match with that of initiator, assign
	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
	 * generated when moving QP to RTS state.
	 * A TERM message will be sent after QP has moved to RTS state
	 */
1607
	if ((ep->mpa_attr.version == 2) && peer2peer &&
1608 1609 1610 1611
			(ep->mpa_attr.p2p_type != p2p_type)) {
		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
		rtr_mismatch = 1;
	}
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627

	attrs.mpa_attr = ep->mpa_attr;
	attrs.max_ird = ep->ird;
	attrs.max_ord = ep->ord;
	attrs.llp_stream_handle = ep;
	attrs.next_state = C4IW_QP_STATE_RTS;

	mask = C4IW_QP_ATTR_NEXT_STATE |
	    C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
	    C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;

	/* bind QP and TID with INIT_WR */
	err = c4iw_modify_qp(ep->com.qp->rhp,
			     ep->com.qp, mask, &attrs, 1);
	if (err)
		goto err;
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637

	/*
	 * If responder's RTR requirement did not match with what initiator
	 * supports, generate TERM message
	 */
	if (rtr_mismatch) {
		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
		attrs.layer_etype = LAYER_MPA | DDP_LLP;
		attrs.ecode = MPA_NOMATCH_RTR;
		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1638
		attrs.send_term = 1;
1639
		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1640
				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1641
		err = -ENOMEM;
1642
		disconnect = 1;
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
		goto out;
	}

	/*
	 * Generate TERM if initiator IRD is not sufficient for responder
	 * provided ORD. Currently, we do the same behaviour even when
	 * responder provided IRD is also not sufficient as regards to
	 * initiator ORD.
	 */
	if (insuff_ird) {
		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
				__func__);
		attrs.layer_etype = LAYER_MPA | DDP_LLP;
		attrs.ecode = MPA_INSUFF_IRD;
		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1658
		attrs.send_term = 1;
1659
		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1660
				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1661
		err = -ENOMEM;
1662
		disconnect = 1;
1663 1664
		goto out;
	}
1665
	goto out;
1666 1667
err_stop_timer:
	stop_ep_timer(ep);
1668
err:
1669
	disconnect = 2;
1670 1671
out:
	connect_reply_upcall(ep, err);
1672
	return disconnect;
1673 1674
}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
/*
 * process_mpa_request - process streaming mode MPA request
 *
 * Returns:
 *
 * 0 upon success indicating a connect request was delivered to the ULP
 * or the mpa request is incomplete but valid so far.
 *
 * 1 if a failure requires the caller to close the connection.
 *
 * 2 if a failure requires the caller to abort the connection.
 */
static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1688 1689
{
	struct mpa_message *mpa;
1690
	struct mpa_v2_conn_params *mpa_v2_params;
1691 1692 1693 1694 1695 1696 1697 1698
	u16 plen;

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);

	/*
	 * If we get more than the supported amount of private data
	 * then we must fail this connection.
	 */
1699 1700
	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
		goto err_stop_timer;
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715

	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);

	/*
	 * Copy the new data into our accumulation buffer.
	 */
	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
				  skb->len);
	ep->mpa_pkt_len += skb->len;

	/*
	 * If we don't even have the mpa message, then bail.
	 * We'll continue process when more data arrives.
	 */
	if (ep->mpa_pkt_len < sizeof(*mpa))
1716
		return 0;
1717 1718 1719 1720 1721 1722 1723

	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
	mpa = (struct mpa_message *) ep->mpa_pkt;

	/*
	 * Validate MPA Header.
	 */
1724 1725 1726
	if (mpa->revision > mpa_rev) {
		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
		       " Received = %d\n", __func__, mpa_rev, mpa->revision);
1727
		goto err_stop_timer;
1728 1729
	}

1730 1731
	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
		goto err_stop_timer;
1732 1733 1734 1735 1736 1737

	plen = ntohs(mpa->private_data_size);

	/*
	 * Fail if there's too much private data.
	 */
1738 1739
	if (plen > MPA_MAX_PRIVATE_DATA)
		goto err_stop_timer;
1740 1741 1742 1743

	/*
	 * If plen does not account for pkt size
	 */
1744 1745
	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
		goto err_stop_timer;
1746 1747 1748 1749 1750 1751
	ep->plen = (u8) plen;

	/*
	 * If we don't have all the pdata yet, then bail.
	 */
	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1752
		return 0;
1753 1754 1755 1756 1757 1758 1759 1760 1761

	/*
	 * If we get here we have accumulated the entire mpa
	 * start reply message including private data.
	 */
	ep->mpa_attr.initiator = 0;
	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
	ep->mpa_attr.recv_marker_enabled = markers_enabled;
	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	ep->mpa_attr.version = mpa->revision;
	if (mpa->revision == 1)
		ep->tried_with_mpa_v1 = 1;
	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;

	if (mpa->revision == 2) {
		ep->mpa_attr.enhanced_rdma_conn =
			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
		if (ep->mpa_attr.enhanced_rdma_conn) {
			mpa_v2_params = (struct mpa_v2_conn_params *)
				(ep->mpa_pkt + sizeof(*mpa));
			ep->ird = ntohs(mpa_v2_params->ird) &
				MPA_V2_IRD_ORD_MASK;
			ep->ord = ntohs(mpa_v2_params->ord) &
				MPA_V2_IRD_ORD_MASK;
1777 1778
			PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
			     ep->ord);
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
				if (peer2peer) {
					if (ntohs(mpa_v2_params->ord) &
							MPA_V2_RDMA_WRITE_RTR)
						ep->mpa_attr.p2p_type =
						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
					else if (ntohs(mpa_v2_params->ord) &
							MPA_V2_RDMA_READ_RTR)
						ep->mpa_attr.p2p_type =
						FW_RI_INIT_P2PTYPE_READ_REQ;
				}
		}
	} else if (mpa->revision == 1)
		if (peer2peer)
			ep->mpa_attr.p2p_type = p2p_type;

1795 1796 1797 1798 1799 1800
	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
	     "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
	     ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
	     ep->mpa_attr.p2p_type);

1801 1802 1803 1804 1805 1806
	__state_set(&ep->com, MPA_REQ_RCVD);

	/* drive upcall */
	mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
	if (ep->parent_ep->com.state != DEAD) {
		if (connect_request_upcall(ep))
1807
			goto err_unlock_parent;
1808 1809
	} else {
		goto err_unlock_parent;
1810
	}
1811
	mutex_unlock(&ep->parent_ep->com.mutex);
1812 1813 1814 1815 1816 1817 1818 1819 1820
	return 0;

err_unlock_parent:
	mutex_unlock(&ep->parent_ep->com.mutex);
	goto err_out;
err_stop_timer:
	(void)stop_ep_timer(ep);
err_out:
	return 2;
1821 1822 1823 1824 1825 1826 1827 1828
}

static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;
	struct cpl_rx_data *hdr = cplhdr(skb);
	unsigned int dlen = ntohs(hdr->len);
	unsigned int tid = GET_TID(hdr);
1829
	__u8 status = hdr->status;
1830
	int disconnect = 0;
1831

1832
	ep = get_ep_from_tid(dev, tid);
1833 1834
	if (!ep)
		return 0;
1835 1836 1837
	PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
	skb_pull(skb, sizeof(*hdr));
	skb_trim(skb, dlen);
1838
	mutex_lock(&ep->com.mutex);
1839 1840 1841 1842

	/* update RX credits */
	update_rx_credits(ep, dlen);

1843
	switch (ep->com.state) {
1844
	case MPA_REQ_SENT:
1845
		ep->rcv_seq += dlen;
1846
		disconnect = process_mpa_reply(ep, skb);
1847 1848
		break;
	case MPA_REQ_WAIT:
1849
		ep->rcv_seq += dlen;
1850 1851
		process_mpa_request(ep, skb);
		break;
1852 1853 1854
	case FPDU_MODE: {
		struct c4iw_qp_attributes attrs;
		BUG_ON(!ep->com.qp);
1855
		if (status)
1856
			pr_err("%s Unexpected streaming data." \
1857 1858
			       " qpid %u ep %p state %d tid %u status %d\n",
			       __func__, ep->com.qp->wq.sq.qid, ep,
1859
			       ep->com.state, ep->hwtid, status);
1860
		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1861
		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1862 1863
			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
		disconnect = 1;
1864 1865
		break;
	}
1866 1867 1868
	default:
		break;
	}
1869
	mutex_unlock(&ep->com.mutex);
1870 1871
	if (disconnect)
		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1872
	c4iw_put_ep(&ep->com);
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
	return 0;
}

static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;
	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
	int release = 0;
	unsigned int tid = GET_TID(rpl);

1883
	ep = get_ep_from_tid(dev, tid);
1884 1885 1886 1887
	if (!ep) {
		printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
		return 0;
	}
1888
	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1889
	mutex_lock(&ep->com.mutex);
1890 1891
	switch (ep->com.state) {
	case ABORTING:
1892
		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1893 1894 1895 1896 1897 1898 1899 1900
		__state_set(&ep->com, DEAD);
		release = 1;
		break;
	default:
		printk(KERN_ERR "%s ep %p state %d\n",
		     __func__, ep, ep->com.state);
		break;
	}
1901
	mutex_unlock(&ep->com.mutex);
1902 1903 1904

	if (release)
		release_ep_resources(ep);
1905
	c4iw_put_ep(&ep->com);
1906 1907 1908
	return 0;
}

1909
static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1910 1911 1912 1913 1914
{
	struct sk_buff *skb;
	struct fw_ofld_connection_wr *req;
	unsigned int mtu_idx;
	int wscale;
1915
	struct sockaddr_in *sin;
1916
	int win;
1917 1918 1919 1920

	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
	req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
	memset(req, 0, sizeof(*req));
1921
	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
1922
	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1923 1924
	req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
				     ep->com.dev->rdev.lldi.ports[0],
1925
				     ep->l2t));
1926
	sin = (struct sockaddr_in *)&ep->com.local_addr;
1927 1928
	req->le.lport = sin->sin_port;
	req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1929
	sin = (struct sockaddr_in *)&ep->com.remote_addr;
1930 1931
	req->le.pport = sin->sin_port;
	req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1932
	req->tcb.t_state_to_astid =
1933 1934
			htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
			FW_OFLD_CONNECTION_WR_ASTID_V(atid));
1935
	req->tcb.cplrxdataack_cplpassacceptrpl =
1936
			htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
1937
	req->tcb.tx_max = (__force __be32) jiffies;
1938
	req->tcb.rcv_adv = htons(1);
1939
	best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1940 1941
		 enable_tcp_timestamps,
		 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
1942
	wscale = compute_wscale(rcv_win);
1943 1944 1945 1946 1947 1948

	/*
	 * Specify the largest window that will fit in opt0. The
	 * remainder will be specified in the rx_data_ack.
	 */
	win = ep->rcv_win >> 10;
1949 1950
	if (win > RCV_BUFSIZ_M)
		win = RCV_BUFSIZ_M;
1951

1952 1953
	req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
		(nocong ? NO_CONG_F : 0) |
1954
		KEEP_ALIVE_F |
1955
		DELACK_F |
1956 1957 1958 1959 1960
		WND_SCALE_V(wscale) |
		MSS_IDX_V(mtu_idx) |
		L2T_IDX_V(ep->l2t->idx) |
		TX_CHAN_V(ep->tx_chan) |
		SMAC_SEL_V(ep->smac_idx) |
H
Hariprasad S 已提交
1961
		DSCP_V(ep->tos >> 2) |
1962 1963
		ULP_MODE_V(ULP_MODE_TCPDDP) |
		RCV_BUFSIZ_V(win));
1964 1965
	req->tcb.opt2 = (__force __be32) (PACE_V(1) |
		TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1966
		RX_CHANNEL_V(0) |
1967
		CCTRL_ECN_V(enable_ecn) |
1968
		RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1969
	if (enable_tcp_timestamps)
1970
		req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
1971
	if (enable_tcp_sack)
1972
		req->tcb.opt2 |= (__force __be32)SACK_EN_F;
1973
	if (wscale && enable_tcp_window_scaling)
1974 1975 1976
		req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
	req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
	req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
1977 1978
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
	set_bit(ACT_OFLD_CONN, &ep->com.history);
1979
	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1980 1981
}

1982 1983 1984 1985 1986 1987 1988 1989 1990
/*
 * Return whether a failed active open has allocated a TID
 */
static inline int act_open_has_tid(int status)
{
	return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
	       status != CPL_ERR_ARP_MISS;
}

1991 1992 1993 1994 1995 1996 1997 1998 1999
/* Returns whether a CPL status conveys negative advice.
 */
static int is_neg_adv(unsigned int status)
{
	return status == CPL_ERR_RTX_NEG_ADVICE ||
	       status == CPL_ERR_PERSIST_NEG_ADVICE ||
	       status == CPL_ERR_KEEPALV_NEG_ADVICE;
}

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
static char *neg_adv_str(unsigned int status)
{
	switch (status) {
	case CPL_ERR_RTX_NEG_ADVICE:
		return "Retransmit timeout";
	case CPL_ERR_PERSIST_NEG_ADVICE:
		return "Persist timeout";
	case CPL_ERR_KEEPALV_NEG_ADVICE:
		return "Keepalive timeout";
	default:
		return "Unknown";
	}
}

2014 2015 2016 2017 2018 2019 2020
static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
	ep->snd_win = snd_win;
	ep->rcv_win = rcv_win;
	PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
}

2021 2022
#define ACT_OPEN_RETRY_COUNT 2

2023 2024
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
		     struct dst_entry *dst, struct c4iw_dev *cdev,
H
Hariprasad S 已提交
2025
		     bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
{
	struct neighbour *n;
	int err, step;
	struct net_device *pdev;

	n = dst_neigh_lookup(dst, peer_ip);
	if (!n)
		return -ENODEV;

	rcu_read_lock();
	err = -ENOMEM;
	if (n->dev->flags & IFF_LOOPBACK) {
		if (iptype == 4)
			pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
		else if (IS_ENABLED(CONFIG_IPV6))
			for_each_netdev(&init_net, pdev) {
				if (ipv6_chk_addr(&init_net,
						  (struct in6_addr *)peer_ip,
						  pdev, 1))
					break;
			}
		else
			pdev = NULL;

		if (!pdev) {
			err = -ENODEV;
			goto out;
		}
		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
H
Hariprasad S 已提交
2055
					n, pdev, rt_tos2priority(tos));
2056 2057 2058 2059
		if (!ep->l2t)
			goto out;
		ep->mtu = pdev->mtu;
		ep->tx_chan = cxgb4_port_chan(pdev);
2060 2061
		ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
						cxgb4_port_viid(pdev));
2062 2063 2064 2065 2066 2067 2068 2069
		step = cdev->rdev.lldi.ntxq /
			cdev->rdev.lldi.nchan;
		ep->txq_idx = cxgb4_port_idx(pdev) * step;
		step = cdev->rdev.lldi.nrxq /
			cdev->rdev.lldi.nchan;
		ep->ctrlq_idx = cxgb4_port_idx(pdev);
		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
			cxgb4_port_idx(pdev) * step];
2070
		set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2071 2072 2073 2074 2075 2076 2077 2078
		dev_put(pdev);
	} else {
		pdev = get_real_dev(n->dev);
		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
					n, pdev, 0);
		if (!ep->l2t)
			goto out;
		ep->mtu = dst_mtu(dst);
S
Steve Wise 已提交
2079
		ep->tx_chan = cxgb4_port_chan(pdev);
2080 2081
		ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
						cxgb4_port_viid(pdev));
2082 2083
		step = cdev->rdev.lldi.ntxq /
			cdev->rdev.lldi.nchan;
S
Steve Wise 已提交
2084 2085
		ep->txq_idx = cxgb4_port_idx(pdev) * step;
		ep->ctrlq_idx = cxgb4_port_idx(pdev);
2086 2087 2088
		step = cdev->rdev.lldi.nrxq /
			cdev->rdev.lldi.nchan;
		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
S
Steve Wise 已提交
2089
			cxgb4_port_idx(pdev) * step];
2090
		set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105

		if (clear_mpa_v1) {
			ep->retry_with_mpa_v1 = 0;
			ep->tried_with_mpa_v1 = 0;
		}
	}
	err = 0;
out:
	rcu_read_unlock();

	neigh_release(n);

	return err;
}

2106 2107 2108
static int c4iw_reconnect(struct c4iw_ep *ep)
{
	int err = 0;
2109
	struct sockaddr_in *laddr = (struct sockaddr_in *)
2110
				    &ep->com.cm_id->m_local_addr;
2111
	struct sockaddr_in *raddr = (struct sockaddr_in *)
2112
				    &ep->com.cm_id->m_remote_addr;
2113
	struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
2114
				      &ep->com.cm_id->m_local_addr;
2115
	struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
2116
				      &ep->com.cm_id->m_remote_addr;
2117 2118
	int iptype;
	__u8 *ra;
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134

	PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
	init_timer(&ep->timer);

	/*
	 * Allocate an active TID to initiate a TCP connection.
	 */
	ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
	if (ep->atid == -1) {
		pr_err("%s - cannot alloc atid.\n", __func__);
		err = -ENOMEM;
		goto fail2;
	}
	insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);

	/* find a route */
2135
	if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
2136 2137
		ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
				     raddr->sin_addr.s_addr, laddr->sin_port,
H
Hariprasad S 已提交
2138
				     raddr->sin_port, ep->com.cm_id->tos);
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
		iptype = 4;
		ra = (__u8 *)&raddr->sin_addr;
	} else {
		ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
				      raddr6->sin6_addr.s6_addr,
				      laddr6->sin6_port, raddr6->sin6_port, 0,
				      raddr6->sin6_scope_id);
		iptype = 6;
		ra = (__u8 *)&raddr6->sin6_addr;
	}
	if (!ep->dst) {
2150 2151 2152 2153
		pr_err("%s - cannot find route.\n", __func__);
		err = -EHOSTUNREACH;
		goto fail3;
	}
2154
	err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
H
Hariprasad S 已提交
2155 2156
			ep->com.dev->rdev.lldi.adapter_type,
			ep->com.cm_id->tos);
2157
	if (err) {
2158 2159 2160 2161 2162 2163 2164 2165 2166
		pr_err("%s - cannot alloc l2e.\n", __func__);
		goto fail4;
	}

	PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
	     __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
	     ep->l2t->idx);

	state_set(&ep->com, CONNECTING);
H
Hariprasad S 已提交
2167
	ep->tos = ep->com.cm_id->tos;
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192

	/* send connect request to rnic */
	err = send_connect(ep);
	if (!err)
		goto out;

	cxgb4_l2t_release(ep->l2t);
fail4:
	dst_release(ep->dst);
fail3:
	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
	/*
	 * remember to send notification to upper layer.
	 * We are in here so the upper layer is not aware that this is
	 * re-connect attempt and so, upper layer is still waiting for
	 * response of 1st connect request.
	 */
	connect_reply_upcall(ep, -ECONNRESET);
	c4iw_put_ep(&ep->com);
out:
	return err;
}

2193 2194 2195 2196
static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;
	struct cpl_act_open_rpl *rpl = cplhdr(skb);
2197 2198
	unsigned int atid = TID_TID_G(AOPEN_ATID_G(
				      ntohl(rpl->atid_status)));
2199
	struct tid_info *t = dev->rdev.lldi.tids;
2200
	int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2201 2202 2203 2204
	struct sockaddr_in *la;
	struct sockaddr_in *ra;
	struct sockaddr_in6 *la6;
	struct sockaddr_in6 *ra6;
2205
	int ret = 0;
2206 2207

	ep = lookup_atid(t, atid);
2208 2209 2210 2211
	la = (struct sockaddr_in *)&ep->com.local_addr;
	ra = (struct sockaddr_in *)&ep->com.remote_addr;
	la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
	ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
2212 2213 2214 2215

	PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
	     status, status2errno(status));

2216
	if (is_neg_adv(status)) {
2217 2218 2219 2220 2221 2222
		PDBG("%s Connection problems for atid %u status %u (%s)\n",
		     __func__, atid, status, neg_adv_str(status));
		ep->stats.connect_neg_adv++;
		mutex_lock(&dev->rdev.stats.lock);
		dev->rdev.stats.neg_adv++;
		mutex_unlock(&dev->rdev.stats.lock);
2223 2224 2225
		return 0;
	}

2226 2227
	set_bit(ACT_OPEN_RPL, &ep->com.history);

2228 2229 2230 2231 2232 2233 2234
	/*
	 * Log interesting failures.
	 */
	switch (status) {
	case CPL_ERR_CONN_RESET:
	case CPL_ERR_CONN_TIMEDOUT:
		break;
2235
	case CPL_ERR_TCAM_FULL:
2236
		mutex_lock(&dev->rdev.stats.lock);
2237
		dev->rdev.stats.tcam_full++;
2238 2239 2240
		mutex_unlock(&dev->rdev.stats.lock);
		if (ep->com.local_addr.ss_family == AF_INET &&
		    dev->rdev.lldi.enable_fw_ofld_conn) {
2241 2242 2243 2244
			ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
						   ntohl(rpl->atid_status))));
			if (ret)
				goto fail;
2245 2246 2247 2248 2249 2250
			return 0;
		}
		break;
	case CPL_ERR_CONN_EXIST:
		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
			set_bit(ACT_RETRY_INUSE, &ep->com.history);
H
Hariprasad S 已提交
2251 2252 2253
			if (ep->com.remote_addr.ss_family == AF_INET6) {
				struct sockaddr_in6 *sin6 =
						(struct sockaddr_in6 *)
2254
						&ep->com.local_addr;
H
Hariprasad S 已提交
2255 2256 2257 2258 2259
				cxgb4_clip_release(
						ep->com.dev->rdev.lldi.ports[0],
						(const u32 *)
						&sin6->sin6_addr.s6_addr, 1);
			}
2260 2261 2262 2263 2264 2265 2266 2267
			remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
					atid);
			cxgb4_free_atid(t, atid);
			dst_release(ep->dst);
			cxgb4_l2t_release(ep->l2t);
			c4iw_reconnect(ep);
			return 0;
		}
2268
		break;
2269
	default:
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
		if (ep->com.local_addr.ss_family == AF_INET) {
			pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
				atid, status, status2errno(status),
				&la->sin_addr.s_addr, ntohs(la->sin_port),
				&ra->sin_addr.s_addr, ntohs(ra->sin_port));
		} else {
			pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
				atid, status, status2errno(status),
				la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
				ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
		}
2281 2282 2283
		break;
	}

2284
fail:
2285 2286 2287
	connect_reply_upcall(ep, status2errno(status));
	state_set(&ep->com, DEAD);

H
Hariprasad S 已提交
2288 2289
	if (ep->com.remote_addr.ss_family == AF_INET6) {
		struct sockaddr_in6 *sin6 =
2290
			(struct sockaddr_in6 *)&ep->com.local_addr;
H
Hariprasad S 已提交
2291 2292 2293
		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
	}
2294 2295 2296
	if (status && act_open_has_tid(status))
		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));

2297
	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
	cxgb4_free_atid(t, atid);
	dst_release(ep->dst);
	cxgb4_l2t_release(ep->l2t);
	c4iw_put_ep(&ep->com);

	return 0;
}

static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
	struct tid_info *t = dev->rdev.lldi.tids;
	unsigned int stid = GET_TID(rpl);
	struct c4iw_listen_ep *ep = lookup_stid(t, stid);

	if (!ep) {
2314 2315
		PDBG("%s stid %d lookup failure!\n", __func__, stid);
		goto out;
2316 2317 2318
	}
	PDBG("%s ep %p status %d error %d\n", __func__, ep,
	     rpl->status, status2errno(rpl->status));
2319
	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2320

2321
out:
2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332
	return 0;
}

static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
	struct tid_info *t = dev->rdev.lldi.tids;
	unsigned int stid = GET_TID(rpl);
	struct c4iw_listen_ep *ep = lookup_stid(t, stid);

	PDBG("%s ep %p\n", __func__, ep);
2333
	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2334 2335 2336
	return 0;
}

2337 2338
static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
		     struct cpl_pass_accept_req *req)
2339 2340 2341 2342 2343 2344
{
	struct cpl_pass_accept_rpl *rpl;
	unsigned int mtu_idx;
	u64 opt0;
	u32 opt2;
	int wscale;
2345
	struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
2346
	int win;
2347
	enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
2348 2349 2350

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	BUG_ON(skb_cloned(skb));
2351

2352
	skb_get(skb);
2353
	rpl = cplhdr(skb);
2354
	if (!is_t4(adapter_type)) {
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
		skb_trim(skb, roundup(sizeof(*rpl5), 16));
		rpl5 = (void *)rpl;
		INIT_TP_WR(rpl5, ep->hwtid);
	} else {
		skb_trim(skb, sizeof(*rpl));
		INIT_TP_WR(rpl, ep->hwtid);
	}
	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
						    ep->hwtid));

	best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2366 2367
		 enable_tcp_timestamps && req->tcpopt.tstamp,
		 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
2368
	wscale = compute_wscale(rcv_win);
2369 2370 2371 2372 2373 2374

	/*
	 * Specify the largest window that will fit in opt0. The
	 * remainder will be specified in the rx_data_ack.
	 */
	win = ep->rcv_win >> 10;
2375 2376
	if (win > RCV_BUFSIZ_M)
		win = RCV_BUFSIZ_M;
2377
	opt0 = (nocong ? NO_CONG_F : 0) |
2378
	       KEEP_ALIVE_F |
2379
	       DELACK_F |
2380 2381 2382 2383 2384
	       WND_SCALE_V(wscale) |
	       MSS_IDX_V(mtu_idx) |
	       L2T_IDX_V(ep->l2t->idx) |
	       TX_CHAN_V(ep->tx_chan) |
	       SMAC_SEL_V(ep->smac_idx) |
2385
	       DSCP_V(ep->tos >> 2) |
2386 2387 2388 2389
	       ULP_MODE_V(ULP_MODE_TCPDDP) |
	       RCV_BUFSIZ_V(win);
	opt2 = RX_CHANNEL_V(0) |
	       RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2390 2391

	if (enable_tcp_timestamps && req->tcpopt.tstamp)
2392
		opt2 |= TSTAMPS_EN_F;
2393
	if (enable_tcp_sack && req->tcpopt.sack)
2394
		opt2 |= SACK_EN_F;
2395
	if (wscale && enable_tcp_window_scaling)
2396
		opt2 |= WND_SCALE_EN_F;
2397 2398 2399 2400
	if (enable_ecn) {
		const struct tcphdr *tcph;
		u32 hlen = ntohl(req->hdr_len);

2401 2402 2403 2404 2405 2406
		if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
			tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
				IP_HDR_LEN_G(hlen);
		else
			tcph = (const void *)(req + 1) +
				T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
2407
		if (tcph->ece && tcph->cwr)
2408
			opt2 |= CCTRL_ECN_V(1);
2409
	}
2410
	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
2411
		u32 isn = (prandom_u32() & ~7UL) - 1;
2412
		opt2 |= T5_OPT_2_VALID_F;
2413
		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2414
		opt2 |= T5_ISS_F;
2415 2416 2417 2418 2419 2420
		rpl5 = (void *)rpl;
		memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
		if (peer2peer)
			isn += 4;
		rpl5->iss = cpu_to_be32(isn);
		PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
2421
	}
2422 2423 2424

	rpl->opt0 = cpu_to_be64(opt0);
	rpl->opt2 = cpu_to_be32(opt2);
S
Steve Wise 已提交
2425
	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2426
	t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
2427

2428
	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2429 2430
}

2431
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2432
{
2433
	PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2434 2435 2436 2437 2438 2439
	BUG_ON(skb_cloned(skb));
	skb_trim(skb, sizeof(struct cpl_tid_release));
	release_tid(&dev->rdev, hwtid, skb);
	return;
}

2440 2441
static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
		       int *iptype, __u8 *local_ip, __u8 *peer_ip,
2442 2443
		       __be16 *local_port, __be16 *peer_port)
{
2444 2445 2446 2447 2448 2449
	int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
		      ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
		      T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
	int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
		     IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
		     T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
2450
	struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
2451
	struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
2452 2453 2454
	struct tcphdr *tcp = (struct tcphdr *)
			     ((u8 *)(req + 1) + eth_len + ip_len);

2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
	if (ip->version == 4) {
		PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
		     ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
		     ntohs(tcp->dest));
		*iptype = 4;
		memcpy(peer_ip, &ip->saddr, 4);
		memcpy(local_ip, &ip->daddr, 4);
	} else {
		PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
		     ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
		     ntohs(tcp->dest));
		*iptype = 6;
		memcpy(peer_ip, ip6->saddr.s6_addr, 16);
		memcpy(local_ip, ip6->daddr.s6_addr, 16);
	}
2470 2471 2472 2473 2474 2475 2476 2477
	*peer_port = tcp->source;
	*local_port = tcp->dest;

	return;
}

static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
2478
	struct c4iw_ep *child_ep = NULL, *parent_ep;
2479
	struct cpl_pass_accept_req *req = cplhdr(skb);
2480
	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
2481 2482 2483
	struct tid_info *t = dev->rdev.lldi.tids;
	unsigned int hwtid = GET_TID(req);
	struct dst_entry *dst;
2484
	__u8 local_ip[16], peer_ip[16];
2485
	__be16 local_port, peer_port;
H
Hariprasad S 已提交
2486
	struct sockaddr_in6 *sin6;
2487
	int err;
2488
	u16 peer_mss = ntohs(req->tcpopt.mss);
2489
	int iptype;
2490
	unsigned short hdrs;
H
Hariprasad S 已提交
2491
	u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
2492 2493

	parent_ep = lookup_stid(t, stid);
2494 2495 2496 2497 2498
	if (!parent_ep) {
		PDBG("%s connect request on invalid stid %d\n", __func__, stid);
		goto reject;
	}

2499
	if (state_read(&parent_ep->com) != LISTEN) {
2500
		PDBG("%s - listening ep not in LISTEN\n", __func__);
2501 2502 2503
		goto reject;
	}

2504 2505
	get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype,
		   local_ip, peer_ip, &local_port, &peer_port);
2506

2507
	/* Find output route */
2508 2509 2510 2511 2512 2513 2514
	if (iptype == 4)  {
		PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
		     , __func__, parent_ep, hwtid,
		     local_ip, peer_ip, ntohs(local_port),
		     ntohs(peer_port), peer_mss);
		dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
				 local_port, peer_port,
H
Hariprasad S 已提交
2515
				 tos);
2516 2517 2518 2519 2520 2521
	} else {
		PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
		     , __func__, parent_ep, hwtid,
		     local_ip, peer_ip, ntohs(local_port),
		     ntohs(peer_port), peer_mss);
		dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
2522
				  PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
2523 2524 2525 2526
				  ((struct sockaddr_in6 *)
				  &parent_ep->com.local_addr)->sin6_scope_id);
	}
	if (!dst) {
2527 2528 2529 2530
		printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
		       __func__);
		goto reject;
	}
2531 2532 2533 2534

	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
	if (!child_ep) {
		printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2535 2536 2537 2538 2539
		       __func__);
		dst_release(dst);
		goto reject;
	}

2540
	err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
H
Hariprasad S 已提交
2541
			parent_ep->com.dev->rdev.lldi.adapter_type, tos);
2542 2543
	if (err) {
		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
2544 2545
		       __func__);
		dst_release(dst);
2546
		kfree(child_ep);
2547 2548
		goto reject;
	}
2549

2550 2551 2552 2553
	hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
	       ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
	if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
		child_ep->mtu = peer_mss + hdrs;
2554

2555 2556 2557
	state_set(&child_ep->com, CONNECTING);
	child_ep->com.dev = dev;
	child_ep->com.cm_id = NULL;
2558

2559 2560
	if (iptype == 4) {
		struct sockaddr_in *sin = (struct sockaddr_in *)
2561
			&child_ep->com.local_addr;
2562

2563 2564 2565
		sin->sin_family = PF_INET;
		sin->sin_port = local_port;
		sin->sin_addr.s_addr = *(__be32 *)local_ip;
2566 2567 2568 2569 2570 2571 2572

		sin = (struct sockaddr_in *)&child_ep->com.local_addr;
		sin->sin_family = PF_INET;
		sin->sin_port = ((struct sockaddr_in *)
				 &parent_ep->com.local_addr)->sin_port;
		sin->sin_addr.s_addr = *(__be32 *)local_ip;

2573
		sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2574 2575 2576 2577
		sin->sin_family = PF_INET;
		sin->sin_port = peer_port;
		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
	} else {
2578
		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2579 2580 2581
		sin6->sin6_family = PF_INET6;
		sin6->sin6_port = local_port;
		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2582 2583 2584 2585 2586 2587 2588

		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
		sin6->sin6_family = PF_INET6;
		sin6->sin6_port = ((struct sockaddr_in6 *)
				   &parent_ep->com.local_addr)->sin6_port;
		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);

2589
		sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2590 2591 2592 2593
		sin6->sin6_family = PF_INET6;
		sin6->sin6_port = peer_port;
		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
	}
2594

2595 2596
	c4iw_get_ep(&parent_ep->com);
	child_ep->parent_ep = parent_ep;
H
Hariprasad S 已提交
2597
	child_ep->tos = tos;
2598 2599 2600 2601
	child_ep->dst = dst;
	child_ep->hwtid = hwtid;

	PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
2602
	     child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2603 2604 2605

	init_timer(&child_ep->timer);
	cxgb4_insert_tid(t, child_ep, hwtid);
2606
	insert_ep_tid(child_ep);
2607 2608 2609 2610 2611 2612
	if (accept_cr(child_ep, skb, req)) {
		c4iw_put_ep(&parent_ep->com);
		release_ep_resources(child_ep);
	} else {
		set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
	}
H
Hariprasad S 已提交
2613
	if (iptype == 6) {
2614
		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
H
Hariprasad S 已提交
2615 2616 2617
		cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
			       (const u32 *)&sin6->sin6_addr.s6_addr, 1);
	}
2618 2619
	goto out;
reject:
2620
	reject_cr(dev, hwtid, skb);
2621 2622 2623 2624 2625 2626 2627 2628 2629
out:
	return 0;
}

static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;
	struct cpl_pass_establish *req = cplhdr(skb);
	unsigned int tid = GET_TID(req);
2630
	int ret;
2631

2632
	ep = get_ep_from_tid(dev, tid);
2633 2634 2635 2636
	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	ep->snd_seq = be32_to_cpu(req->snd_isn);
	ep->rcv_seq = be32_to_cpu(req->rcv_isn);

2637 2638 2639
	PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
	     ntohs(req->tcp_opt));

2640 2641 2642
	set_emss(ep, ntohs(req->tcp_opt));

	dst_confirm(ep->dst);
2643 2644
	mutex_lock(&ep->com.mutex);
	ep->com.state = MPA_REQ_WAIT;
2645
	start_ep_timer(ep);
2646
	set_bit(PASS_ESTAB, &ep->com.history);
2647 2648 2649 2650
	ret = send_flowc(ep, skb);
	mutex_unlock(&ep->com.mutex);
	if (ret)
		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2651
	c4iw_put_ep(&ep->com);
2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663

	return 0;
}

static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_peer_close *hdr = cplhdr(skb);
	struct c4iw_ep *ep;
	struct c4iw_qp_attributes attrs;
	int disconnect = 1;
	int release = 0;
	unsigned int tid = GET_TID(hdr);
S
Steve Wise 已提交
2664
	int ret;
2665

2666 2667 2668 2669
	ep = get_ep_from_tid(dev, tid);
	if (!ep)
		return 0;

2670 2671 2672
	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	dst_confirm(ep->dst);

2673
	set_bit(PEER_CLOSE, &ep->com.history);
2674
	mutex_lock(&ep->com.mutex);
2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
	switch (ep->com.state) {
	case MPA_REQ_WAIT:
		__state_set(&ep->com, CLOSING);
		break;
	case MPA_REQ_SENT:
		__state_set(&ep->com, CLOSING);
		connect_reply_upcall(ep, -ECONNRESET);
		break;
	case MPA_REQ_RCVD:

		/*
		 * We're gonna mark this puppy DEAD, but keep
		 * the reference on it until the ULP accepts or
		 * rejects the CR. Also wake up anyone waiting
		 * in rdma connection migration (see c4iw_accept_cr()).
		 */
		__state_set(&ep->com, CLOSING);
		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2693
		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2694 2695 2696 2697
		break;
	case MPA_REP_SENT:
		__state_set(&ep->com, CLOSING);
		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2698
		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2699 2700
		break;
	case FPDU_MODE:
2701
		start_ep_timer(ep);
2702
		__state_set(&ep->com, CLOSING);
2703
		attrs.next_state = C4IW_QP_STATE_CLOSING;
S
Steve Wise 已提交
2704
		ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2705
				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
S
Steve Wise 已提交
2706 2707 2708 2709
		if (ret != -ECONNRESET) {
			peer_close_upcall(ep);
			disconnect = 1;
		}
2710 2711 2712 2713 2714 2715 2716 2717 2718
		break;
	case ABORTING:
		disconnect = 0;
		break;
	case CLOSING:
		__state_set(&ep->com, MORIBUND);
		disconnect = 0;
		break;
	case MORIBUND:
S
Steve Wise 已提交
2719
		(void)stop_ep_timer(ep);
2720 2721 2722 2723 2724
		if (ep->com.cm_id && ep->com.qp) {
			attrs.next_state = C4IW_QP_STATE_IDLE;
			c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
		}
2725
		close_complete_upcall(ep, 0);
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
		__state_set(&ep->com, DEAD);
		release = 1;
		disconnect = 0;
		break;
	case DEAD:
		disconnect = 0;
		break;
	default:
		BUG_ON(1);
	}
2736
	mutex_unlock(&ep->com.mutex);
2737 2738 2739 2740
	if (disconnect)
		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
	if (release)
		release_ep_resources(ep);
2741
	c4iw_put_ep(&ep->com);
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
	return 0;
}

static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_abort_req_rss *req = cplhdr(skb);
	struct c4iw_ep *ep;
	struct cpl_abort_rpl *rpl;
	struct sk_buff *rpl_skb;
	struct c4iw_qp_attributes attrs;
	int ret;
	int release = 0;
	unsigned int tid = GET_TID(req);

2756 2757 2758 2759
	ep = get_ep_from_tid(dev, tid);
	if (!ep)
		return 0;

2760
	if (is_neg_adv(req->status)) {
2761 2762 2763 2764 2765 2766 2767
		PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
		     __func__, ep->hwtid, req->status,
		     neg_adv_str(req->status));
		ep->stats.abort_neg_adv++;
		mutex_lock(&dev->rdev.stats.lock);
		dev->rdev.stats.neg_adv++;
		mutex_unlock(&dev->rdev.stats.lock);
2768
		goto deref_ep;
2769 2770 2771
	}
	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
	     ep->com.state);
2772
	set_bit(PEER_ABORT, &ep->com.history);
2773 2774 2775

	/*
	 * Wake up any threads in rdma_init() or rdma_fini().
2776 2777
	 * However, this is not needed if com state is just
	 * MPA_REQ_SENT
2778
	 */
2779 2780
	if (ep->com.state != MPA_REQ_SENT)
		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2781 2782

	mutex_lock(&ep->com.mutex);
2783 2784
	switch (ep->com.state) {
	case CONNECTING:
2785
		c4iw_put_ep(&ep->parent_ep->com);
2786 2787
		break;
	case MPA_REQ_WAIT:
S
Steve Wise 已提交
2788
		(void)stop_ep_timer(ep);
2789 2790
		break;
	case MPA_REQ_SENT:
S
Steve Wise 已提交
2791
		(void)stop_ep_timer(ep);
2792
		if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
			connect_reply_upcall(ep, -ECONNRESET);
		else {
			/*
			 * we just don't send notification upwards because we
			 * want to retry with mpa_v1 without upper layers even
			 * knowing it.
			 *
			 * do some housekeeping so as to re-initiate the
			 * connection
			 */
			PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
			     mpa_rev);
			ep->retry_with_mpa_v1 = 1;
		}
2807 2808 2809 2810 2811 2812 2813
		break;
	case MPA_REP_SENT:
		break;
	case MPA_REQ_RCVD:
		break;
	case MORIBUND:
	case CLOSING:
2814
		stop_ep_timer(ep);
2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832
		/*FALLTHROUGH*/
	case FPDU_MODE:
		if (ep->com.cm_id && ep->com.qp) {
			attrs.next_state = C4IW_QP_STATE_ERROR;
			ret = c4iw_modify_qp(ep->com.qp->rhp,
				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
				     &attrs, 1);
			if (ret)
				printk(KERN_ERR MOD
				       "%s - qp <- error failed!\n",
				       __func__);
		}
		peer_abort_upcall(ep);
		break;
	case ABORTING:
		break;
	case DEAD:
		PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2833
		mutex_unlock(&ep->com.mutex);
2834
		goto deref_ep;
2835 2836 2837 2838 2839 2840 2841
	default:
		BUG_ON(1);
		break;
	}
	dst_confirm(ep->dst);
	if (ep->com.state != ABORTING) {
		__state_set(&ep->com, DEAD);
2842 2843 2844
		/* we don't release if we want to retry with mpa_v1 */
		if (!ep->retry_with_mpa_v1)
			release = 1;
2845
	}
2846
	mutex_unlock(&ep->com.mutex);
2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863

	rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
	if (!rpl_skb) {
		printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
		       __func__);
		release = 1;
		goto out;
	}
	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
	rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
	INIT_TP_WR(rpl, ep->hwtid);
	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
	rpl->cmd = CPL_ABORT_NO_RST;
	c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
	if (release)
		release_ep_resources(ep);
2864
	else if (ep->retry_with_mpa_v1) {
H
Hariprasad S 已提交
2865 2866 2867
		if (ep->com.remote_addr.ss_family == AF_INET6) {
			struct sockaddr_in6 *sin6 =
					(struct sockaddr_in6 *)
2868
					&ep->com.local_addr;
H
Hariprasad S 已提交
2869 2870 2871 2872 2873
			cxgb4_clip_release(
					ep->com.dev->rdev.lldi.ports[0],
					(const u32 *)&sin6->sin6_addr.s6_addr,
					1);
		}
2874
		remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
2875 2876 2877 2878 2879 2880
		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
		dst_release(ep->dst);
		cxgb4_l2t_release(ep->l2t);
		c4iw_reconnect(ep);
	}

2881 2882 2883 2884
deref_ep:
	c4iw_put_ep(&ep->com);
	/* Dereferencing ep, referenced in peer_abort_intr() */
	c4iw_put_ep(&ep->com);
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
	return 0;
}

static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;
	struct c4iw_qp_attributes attrs;
	struct cpl_close_con_rpl *rpl = cplhdr(skb);
	int release = 0;
	unsigned int tid = GET_TID(rpl);

2896 2897 2898
	ep = get_ep_from_tid(dev, tid);
	if (!ep)
		return 0;
2899 2900 2901 2902 2903

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	BUG_ON(!ep);

	/* The cm_id may be null if we failed to connect */
2904
	mutex_lock(&ep->com.mutex);
2905
	set_bit(CLOSE_CON_RPL, &ep->com.history);
2906 2907 2908 2909 2910
	switch (ep->com.state) {
	case CLOSING:
		__state_set(&ep->com, MORIBUND);
		break;
	case MORIBUND:
S
Steve Wise 已提交
2911
		(void)stop_ep_timer(ep);
2912 2913 2914 2915 2916 2917 2918
		if ((ep->com.cm_id) && (ep->com.qp)) {
			attrs.next_state = C4IW_QP_STATE_IDLE;
			c4iw_modify_qp(ep->com.qp->rhp,
					     ep->com.qp,
					     C4IW_QP_ATTR_NEXT_STATE,
					     &attrs, 1);
		}
2919
		close_complete_upcall(ep, 0);
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929
		__state_set(&ep->com, DEAD);
		release = 1;
		break;
	case ABORTING:
	case DEAD:
		break;
	default:
		BUG_ON(1);
		break;
	}
2930
	mutex_unlock(&ep->com.mutex);
2931 2932
	if (release)
		release_ep_resources(ep);
2933
	c4iw_put_ep(&ep->com);
2934 2935 2936 2937 2938
	return 0;
}

static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
{
2939 2940 2941 2942
	struct cpl_rdma_terminate *rpl = cplhdr(skb);
	unsigned int tid = GET_TID(rpl);
	struct c4iw_ep *ep;
	struct c4iw_qp_attributes attrs;
2943

2944
	ep = get_ep_from_tid(dev, tid);
2945
	BUG_ON(!ep);
2946

2947
	if (ep && ep->com.qp) {
2948 2949 2950 2951 2952 2953
		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
		       ep->com.qp->wq.sq.qid);
		attrs.next_state = C4IW_QP_STATE_TERMINATE;
		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
	} else
2954
		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2955
	c4iw_put_ep(&ep->com);
2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972

	return 0;
}

/*
 * Upcall from the adapter indicating data has been transmitted.
 * For us its just the single MPA request or reply.  We can now free
 * the skb holding the mpa message.
 */
static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;
	struct cpl_fw4_ack *hdr = cplhdr(skb);
	u8 credits = hdr->credits;
	unsigned int tid = GET_TID(hdr);


2973 2974 2975
	ep = get_ep_from_tid(dev, tid);
	if (!ep)
		return 0;
2976 2977
	PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
	if (credits == 0) {
2978 2979
		PDBG("%s 0 credit ack ep %p tid %u state %u\n",
		     __func__, ep, ep->hwtid, state_read(&ep->com));
2980
		goto out;
2981 2982 2983 2984 2985 2986 2987 2988 2989
	}

	dst_confirm(ep->dst);
	if (ep->mpa_skb) {
		PDBG("%s last streaming msg ack ep %p tid %u state %u "
		     "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
		     state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
		kfree_skb(ep->mpa_skb);
		ep->mpa_skb = NULL;
2990 2991 2992 2993
		mutex_lock(&ep->com.mutex);
		if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
			stop_ep_timer(ep);
		mutex_unlock(&ep->com.mutex);
2994
	}
2995 2996
out:
	c4iw_put_ep(&ep->com);
2997 2998 2999 3000 3001
	return 0;
}

int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
3002 3003
	int err = 0;
	int disconnect = 0;
3004 3005 3006
	struct c4iw_ep *ep = to_ep(cm_id);
	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);

3007
	mutex_lock(&ep->com.mutex);
3008
	if (ep->com.state != MPA_REQ_RCVD) {
3009
		mutex_unlock(&ep->com.mutex);
3010 3011 3012
		c4iw_put_ep(&ep->com);
		return -ECONNRESET;
	}
3013
	set_bit(ULP_REJECT, &ep->com.history);
3014
	if (mpa_rev == 0)
3015
		disconnect = 2;
3016 3017
	else {
		err = send_mpa_reject(ep, pdata, pdata_len);
3018
		disconnect = 1;
3019
	}
3020
	mutex_unlock(&ep->com.mutex);
3021 3022
	if (disconnect) {
		stop_ep_timer(ep);
3023
		err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
3024
	}
3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036
	c4iw_put_ep(&ep->com);
	return 0;
}

int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
	int err;
	struct c4iw_qp_attributes attrs;
	enum c4iw_qp_attr_mask mask;
	struct c4iw_ep *ep = to_ep(cm_id);
	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
3037
	int abort = 0;
3038 3039

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
3040 3041

	mutex_lock(&ep->com.mutex);
3042
	if (ep->com.state != MPA_REQ_RCVD) {
3043
		err = -ECONNRESET;
3044
		goto err_out;
3045 3046 3047 3048
	}

	BUG_ON(!qp);

3049
	set_bit(ULP_ACCEPT, &ep->com.history);
3050 3051
	if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
	    (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
3052
		err = -EINVAL;
3053
		goto err_abort;
3054 3055
	}

3056 3057
	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
		if (conn_param->ord > ep->ird) {
3058 3059 3060 3061 3062 3063 3064 3065
			if (RELAXED_IRD_NEGOTIATION) {
				ep->ord = ep->ird;
			} else {
				ep->ird = conn_param->ird;
				ep->ord = conn_param->ord;
				send_mpa_reject(ep, conn_param->private_data,
						conn_param->private_data_len);
				err = -ENOMEM;
3066
				goto err_abort;
3067
			}
3068
		}
3069 3070 3071 3072 3073
		if (conn_param->ird < ep->ord) {
			if (RELAXED_IRD_NEGOTIATION &&
			    ep->ord <= h->rdev.lldi.max_ordird_qp) {
				conn_param->ird = ep->ord;
			} else {
3074
				err = -ENOMEM;
3075
				goto err_abort;
3076 3077 3078
			}
		}
	}
3079 3080 3081
	ep->ird = conn_param->ird;
	ep->ord = conn_param->ord;

3082
	if (ep->mpa_attr.version == 1) {
3083 3084
		if (peer2peer && ep->ird == 0)
			ep->ird = 1;
3085 3086 3087
	} else {
		if (peer2peer &&
		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
3088
		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
3089 3090
			ep->ird = 1;
	}
3091 3092 3093

	PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);

3094
	ep->com.cm_id = cm_id;
3095
	ref_cm_id(&ep->com);
3096
	ep->com.qp = qp;
3097
	ref_qp(ep);
3098

3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115
	/* bind QP to EP and move to RTS */
	attrs.mpa_attr = ep->mpa_attr;
	attrs.max_ird = ep->ird;
	attrs.max_ord = ep->ord;
	attrs.llp_stream_handle = ep;
	attrs.next_state = C4IW_QP_STATE_RTS;

	/* bind QP and TID with INIT_WR */
	mask = C4IW_QP_ATTR_NEXT_STATE |
			     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
			     C4IW_QP_ATTR_MPA_ATTR |
			     C4IW_QP_ATTR_MAX_IRD |
			     C4IW_QP_ATTR_MAX_ORD;

	err = c4iw_modify_qp(ep->com.qp->rhp,
			     ep->com.qp, mask, &attrs, 1);
	if (err)
3116
		goto err_deref_cm_id;
3117 3118

	set_bit(STOP_MPA_TIMER, &ep->com.flags);
3119 3120 3121
	err = send_mpa_reply(ep, conn_param->private_data,
			     conn_param->private_data_len);
	if (err)
3122
		goto err_deref_cm_id;
3123

3124
	__state_set(&ep->com, FPDU_MODE);
3125
	established_upcall(ep);
3126
	mutex_unlock(&ep->com.mutex);
3127 3128
	c4iw_put_ep(&ep->com);
	return 0;
3129
err_deref_cm_id:
3130
	deref_cm_id(&ep->com);
3131 3132 3133
err_abort:
	abort = 1;
err_out:
3134
	mutex_unlock(&ep->com.mutex);
3135 3136
	if (abort)
		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3137 3138 3139 3140
	c4iw_put_ep(&ep->com);
	return err;
}

3141 3142 3143 3144
static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
	struct in_device *ind;
	int found = 0;
3145 3146
	struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
	struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190

	ind = in_dev_get(dev->rdev.lldi.ports[0]);
	if (!ind)
		return -EADDRNOTAVAIL;
	for_primary_ifa(ind) {
		laddr->sin_addr.s_addr = ifa->ifa_address;
		raddr->sin_addr.s_addr = ifa->ifa_address;
		found = 1;
		break;
	}
	endfor_ifa(ind);
	in_dev_put(ind);
	return found ? 0 : -EADDRNOTAVAIL;
}

static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
		      unsigned char banned_flags)
{
	struct inet6_dev *idev;
	int err = -EADDRNOTAVAIL;

	rcu_read_lock();
	idev = __in6_dev_get(dev);
	if (idev != NULL) {
		struct inet6_ifaddr *ifp;

		read_lock_bh(&idev->lock);
		list_for_each_entry(ifp, &idev->addr_list, if_list) {
			if (ifp->scope == IFA_LINK &&
			    !(ifp->flags & banned_flags)) {
				memcpy(addr, &ifp->addr, 16);
				err = 0;
				break;
			}
		}
		read_unlock_bh(&idev->lock);
	}
	rcu_read_unlock();
	return err;
}

static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
	struct in6_addr uninitialized_var(addr);
3191 3192
	struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
	struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
3193

3194
	if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
3195 3196 3197 3198 3199 3200 3201
		memcpy(la6->sin6_addr.s6_addr, &addr, 16);
		memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
		return 0;
	}
	return -EADDRNOTAVAIL;
}

3202 3203 3204 3205
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
	struct c4iw_ep *ep;
3206
	int err = 0;
3207 3208 3209 3210
	struct sockaddr_in *laddr;
	struct sockaddr_in *raddr;
	struct sockaddr_in6 *laddr6;
	struct sockaddr_in6 *raddr6;
3211 3212
	__u8 *ra;
	int iptype;
3213

3214 3215
	if ((conn_param->ord > cur_max_read_depth(dev)) ||
	    (conn_param->ird > cur_max_read_depth(dev))) {
3216 3217 3218
		err = -EINVAL;
		goto out;
	}
3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236
	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
	if (!ep) {
		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
		err = -ENOMEM;
		goto out;
	}
	init_timer(&ep->timer);
	ep->plen = conn_param->private_data_len;
	if (ep->plen)
		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
		       conn_param->private_data, ep->plen);
	ep->ird = conn_param->ird;
	ep->ord = conn_param->ord;

	if (peer2peer && ep->ord == 0)
		ep->ord = 1;

	ep->com.cm_id = cm_id;
3237 3238
	ref_cm_id(&ep->com);
	ep->com.dev = dev;
3239
	ep->com.qp = get_qhp(dev, conn_param->qpn);
3240 3241 3242
	if (!ep->com.qp) {
		PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
		err = -EINVAL;
3243
		goto fail1;
3244
	}
3245
	ref_qp(ep);
3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
	PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
	     ep->com.qp, cm_id);

	/*
	 * Allocate an active TID to initiate a TCP connection.
	 */
	ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
	if (ep->atid == -1) {
		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
		err = -ENOMEM;
3256
		goto fail1;
3257
	}
3258
	insert_handle(dev, &dev->atid_idr, ep, ep->atid);
3259

3260
	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3261
	       sizeof(ep->com.local_addr));
3262
	memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
3263 3264
	       sizeof(ep->com.remote_addr));

3265 3266 3267 3268
	laddr = (struct sockaddr_in *)&ep->com.local_addr;
	raddr = (struct sockaddr_in *)&ep->com.remote_addr;
	laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
	raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
3269

3270
	if (cm_id->m_remote_addr.ss_family == AF_INET) {
3271 3272
		iptype = 4;
		ra = (__u8 *)&raddr->sin_addr;
3273

3274 3275 3276 3277 3278 3279
		/*
		 * Handle loopback requests to INADDR_ANY.
		 */
		if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
			err = pick_local_ipaddrs(dev, cm_id);
			if (err)
3280
				goto fail1;
3281 3282 3283 3284 3285 3286 3287 3288
		}

		/* find a route */
		PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
		     __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
		     ra, ntohs(raddr->sin_port));
		ep->dst = find_route(dev, laddr->sin_addr.s_addr,
				     raddr->sin_addr.s_addr, laddr->sin_port,
H
Hariprasad S 已提交
3289
				     raddr->sin_port, cm_id->tos);
3290 3291 3292 3293 3294 3295 3296 3297 3298 3299
	} else {
		iptype = 6;
		ra = (__u8 *)&raddr6->sin6_addr;

		/*
		 * Handle loopback requests to INADDR_ANY.
		 */
		if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
			err = pick_local_ip6addrs(dev, cm_id);
			if (err)
3300
				goto fail1;
3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313
		}

		/* find a route */
		PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
		     __func__, laddr6->sin6_addr.s6_addr,
		     ntohs(laddr6->sin6_port),
		     raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
		ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
				      raddr6->sin6_addr.s6_addr,
				      laddr6->sin6_port, raddr6->sin6_port, 0,
				      raddr6->sin6_scope_id);
	}
	if (!ep->dst) {
3314 3315
		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
		err = -EHOSTUNREACH;
3316
		goto fail2;
3317 3318
	}

3319
	err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
H
Hariprasad S 已提交
3320
			ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3321
	if (err) {
3322
		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
3323
		goto fail3;
3324 3325 3326 3327 3328 3329 3330
	}

	PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
		__func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
		ep->l2t->idx);

	state_set(&ep->com, CONNECTING);
H
Hariprasad S 已提交
3331
	ep->tos = cm_id->tos;
3332 3333 3334 3335 3336 3337 3338 3339

	/* send connect request to rnic */
	err = send_connect(ep);
	if (!err)
		goto out;

	cxgb4_l2t_release(ep->l2t);
fail3:
3340 3341
	dst_release(ep->dst);
fail2:
3342
	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
3343
	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3344
fail1:
3345
	deref_cm_id(&ep->com);
3346 3347 3348 3349 3350
	c4iw_put_ep(&ep->com);
out:
	return err;
}

3351 3352 3353
static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
	int err;
3354
	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3355
				    &ep->com.local_addr;
3356

3357 3358 3359 3360 3361 3362
	if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
		err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
				     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
		if (err)
			return err;
	}
3363 3364 3365 3366 3367 3368 3369 3370 3371
	c4iw_init_wr_wait(&ep->com.wr_wait);
	err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
				   ep->stid, &sin6->sin6_addr,
				   sin6->sin6_port,
				   ep->com.dev->rdev.lldi.rxq_ids[0]);
	if (!err)
		err = c4iw_wait_for_reply(&ep->com.dev->rdev,
					  &ep->com.wr_wait,
					  0, 0, __func__);
3372 3373
	else if (err > 0)
		err = net_xmit_errno(err);
3374 3375 3376
	if (err) {
		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3377 3378 3379
		pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
		       err, ep->stid,
		       sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3380
	}
3381 3382 3383 3384 3385 3386
	return err;
}

static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
	int err;
3387
	struct sockaddr_in *sin = (struct sockaddr_in *)
3388
				  &ep->com.local_addr;
3389 3390 3391 3392 3393 3394 3395 3396

	if (dev->rdev.lldi.enable_fw_ofld_conn) {
		do {
			err = cxgb4_create_server_filter(
				ep->com.dev->rdev.lldi.ports[0], ep->stid,
				sin->sin_addr.s_addr, sin->sin_port, 0,
				ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
			if (err == -EBUSY) {
3397 3398 3399 3400
				if (c4iw_fatal_error(&ep->com.dev->rdev)) {
					err = -EIO;
					break;
				}
3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413
				set_current_state(TASK_UNINTERRUPTIBLE);
				schedule_timeout(usecs_to_jiffies(100));
			}
		} while (err == -EBUSY);
	} else {
		c4iw_init_wr_wait(&ep->com.wr_wait);
		err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
				ep->stid, sin->sin_addr.s_addr, sin->sin_port,
				0, ep->com.dev->rdev.lldi.rxq_ids[0]);
		if (!err)
			err = c4iw_wait_for_reply(&ep->com.dev->rdev,
						  &ep->com.wr_wait,
						  0, 0, __func__);
3414 3415
		else if (err > 0)
			err = net_xmit_errno(err);
3416 3417 3418 3419 3420 3421 3422 3423
	}
	if (err)
		pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
		       , err, ep->stid,
		       &sin->sin_addr, ntohs(sin->sin_port));
	return err;
}

3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439
int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
{
	int err = 0;
	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
	struct c4iw_listen_ep *ep;

	might_sleep();

	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
	if (!ep) {
		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
		err = -ENOMEM;
		goto fail1;
	}
	PDBG("%s ep %p\n", __func__, ep);
	ep->com.cm_id = cm_id;
3440
	ref_cm_id(&ep->com);
3441 3442
	ep->com.dev = dev;
	ep->backlog = backlog;
3443
	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3444
	       sizeof(ep->com.local_addr));
3445 3446 3447 3448

	/*
	 * Allocate a server TID.
	 */
3449 3450
	if (dev->rdev.lldi.enable_fw_ofld_conn &&
	    ep->com.local_addr.ss_family == AF_INET)
3451
		ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3452
					     cm_id->m_local_addr.ss_family, ep);
3453
	else
3454
		ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3455
					    cm_id->m_local_addr.ss_family, ep);
3456

3457
	if (ep->stid == -1) {
3458
		printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
3459 3460 3461
		err = -ENOMEM;
		goto fail2;
	}
3462
	insert_handle(dev, &dev->stid_idr, ep, ep->stid);
3463

3464 3465
	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
	       sizeof(ep->com.local_addr));
3466

3467
	state_set(&ep->com, LISTEN);
3468 3469 3470 3471
	if (ep->com.local_addr.ss_family == AF_INET)
		err = create_server4(dev, ep);
	else
		err = create_server6(dev, ep);
3472 3473 3474 3475
	if (!err) {
		cm_id->provider_data = ep;
		goto out;
	}
3476

3477 3478
	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
			ep->com.local_addr.ss_family);
3479
fail2:
3480
	deref_cm_id(&ep->com);
3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495
	c4iw_put_ep(&ep->com);
fail1:
out:
	return err;
}

int c4iw_destroy_listen(struct iw_cm_id *cm_id)
{
	int err;
	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);

	PDBG("%s ep %p\n", __func__, ep);

	might_sleep();
	state_set(&ep->com, DEAD);
3496 3497
	if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
	    ep->com.local_addr.ss_family == AF_INET) {
3498 3499 3500 3501
		err = cxgb4_remove_server_filter(
			ep->com.dev->rdev.lldi.ports[0], ep->stid,
			ep->com.dev->rdev.lldi.rxq_ids[0], 0);
	} else {
H
Hariprasad S 已提交
3502
		struct sockaddr_in6 *sin6;
3503
		c4iw_init_wr_wait(&ep->com.wr_wait);
3504 3505 3506
		err = cxgb4_remove_server(
				ep->com.dev->rdev.lldi.ports[0], ep->stid,
				ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3507 3508 3509 3510
		if (err)
			goto done;
		err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
					  0, 0, __func__);
3511
		sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
H
Hariprasad S 已提交
3512 3513
		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3514
	}
3515
	remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3516 3517
	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
			ep->com.local_addr.ss_family);
3518
done:
3519
	deref_cm_id(&ep->com);
3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
	c4iw_put_ep(&ep->com);
	return err;
}

int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
{
	int ret = 0;
	int close = 0;
	int fatal = 0;
	struct c4iw_rdev *rdev;

3531
	mutex_lock(&ep->com.mutex);
3532 3533 3534 3535

	PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
	     states[ep->com.state], abrupt);

3536 3537 3538 3539 3540 3541
	/*
	 * Ref the ep here in case we have fatal errors causing the
	 * ep to be released and freed.
	 */
	c4iw_get_ep(&ep->com);

3542 3543 3544
	rdev = &ep->com.dev->rdev;
	if (c4iw_fatal_error(rdev)) {
		fatal = 1;
3545
		close_complete_upcall(ep, -EIO);
3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
		ep->com.state = DEAD;
	}
	switch (ep->com.state) {
	case MPA_REQ_WAIT:
	case MPA_REQ_SENT:
	case MPA_REQ_RCVD:
	case MPA_REP_SENT:
	case FPDU_MODE:
		close = 1;
		if (abrupt)
			ep->com.state = ABORTING;
		else {
			ep->com.state = CLOSING;
3559
			start_ep_timer(ep);
3560 3561 3562 3563 3564 3565 3566
		}
		set_bit(CLOSE_SENT, &ep->com.flags);
		break;
	case CLOSING:
		if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
			close = 1;
			if (abrupt) {
S
Steve Wise 已提交
3567
				(void)stop_ep_timer(ep);
3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584
				ep->com.state = ABORTING;
			} else
				ep->com.state = MORIBUND;
		}
		break;
	case MORIBUND:
	case ABORTING:
	case DEAD:
		PDBG("%s ignoring disconnect ep %p state %u\n",
		     __func__, ep, ep->com.state);
		break;
	default:
		BUG();
		break;
	}

	if (close) {
S
Steve Wise 已提交
3585
		if (abrupt) {
3586
			set_bit(EP_DISC_ABORT, &ep->com.history);
3587
			close_complete_upcall(ep, -ECONNRESET);
S
Steve Wise 已提交
3588
			ret = send_abort(ep, NULL, gfp);
3589 3590
		} else {
			set_bit(EP_DISC_CLOSE, &ep->com.history);
3591
			ret = send_halfclose(ep, gfp);
3592
		}
3593
		if (ret) {
3594
			set_bit(EP_DISC_FAIL, &ep->com.history);
3595 3596 3597 3598
			if (!abrupt) {
				stop_ep_timer(ep);
				close_complete_upcall(ep, -EIO);
			}
3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611
			if (ep->com.qp) {
				struct c4iw_qp_attributes attrs;

				attrs.next_state = C4IW_QP_STATE_ERROR;
				ret = c4iw_modify_qp(ep->com.qp->rhp,
						     ep->com.qp,
						     C4IW_QP_ATTR_NEXT_STATE,
						     &attrs, 1);
				if (ret)
					pr_err(MOD
					       "%s - qp <- error failed!\n",
					       __func__);
			}
3612
			fatal = 1;
3613
		}
3614
	}
S
Steve Wise 已提交
3615
	mutex_unlock(&ep->com.mutex);
3616
	c4iw_put_ep(&ep->com);
3617 3618 3619 3620 3621
	if (fatal)
		release_ep_resources(ep);
	return ret;
}

3622 3623 3624 3625
static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
{
	struct c4iw_ep *ep;
3626
	int atid = be32_to_cpu(req->tid);
3627

3628 3629
	ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
					   (__force u32) req->tid);
3630 3631 3632 3633 3634
	if (!ep)
		return;

	switch (req->retval) {
	case FW_ENOMEM:
3635 3636 3637 3638 3639
		set_bit(ACT_RETRY_NOMEM, &ep->com.history);
		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
			send_fw_act_open_req(ep, atid);
			return;
		}
3640
	case FW_EADDRINUSE:
3641 3642 3643 3644 3645
		set_bit(ACT_RETRY_INUSE, &ep->com.history);
		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
			send_fw_act_open_req(ep, atid);
			return;
		}
3646 3647 3648 3649 3650 3651
		break;
	default:
		pr_info("%s unexpected ofld conn wr retval %d\n",
		       __func__, req->retval);
		break;
	}
3652 3653 3654 3655 3656
	pr_err("active ofld_connect_wr failure %d atid %d\n",
	       req->retval, atid);
	mutex_lock(&dev->rdev.stats.lock);
	dev->rdev.stats.act_ofld_conn_fails++;
	mutex_unlock(&dev->rdev.stats.lock);
3657
	connect_reply_upcall(ep, status2errno(req->retval));
3658
	state_set(&ep->com, DEAD);
H
Hariprasad S 已提交
3659 3660
	if (ep->com.remote_addr.ss_family == AF_INET6) {
		struct sockaddr_in6 *sin6 =
3661
			(struct sockaddr_in6 *)&ep->com.local_addr;
H
Hariprasad S 已提交
3662 3663 3664
		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
	}
3665 3666 3667 3668 3669
	remove_handle(dev, &dev->atid_idr, atid);
	cxgb4_free_atid(dev->rdev.lldi.tids, atid);
	dst_release(ep->dst);
	cxgb4_l2t_release(ep->l2t);
	c4iw_put_ep(&ep->com);
3670 3671 3672 3673 3674 3675 3676 3677 3678
}

static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
{
	struct sk_buff *rpl_skb;
	struct cpl_pass_accept_req *cpl;
	int ret;

3679
	rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
3680 3681 3682
	BUG_ON(!rpl_skb);
	if (req->retval) {
		PDBG("%s passive open failure %d\n", __func__, req->retval);
3683 3684 3685
		mutex_lock(&dev->rdev.stats.lock);
		dev->rdev.stats.pas_ofld_conn_fails++;
		mutex_unlock(&dev->rdev.stats.lock);
3686 3687 3688 3689
		kfree_skb(rpl_skb);
	} else {
		cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
		OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
3690 3691
					(__force u32) htonl(
					(__force u32) req->tid)));
3692 3693 3694 3695 3696 3697 3698 3699
		ret = pass_accept_req(dev, rpl_skb);
		if (!ret)
			kfree_skb(rpl_skb);
	}
	return;
}

static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3700 3701
{
	struct cpl_fw6_msg *rpl = cplhdr(skb);
3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
	struct cpl_fw6_msg_ofld_connection_wr_rpl *req;

	switch (rpl->type) {
	case FW6_TYPE_CQE:
		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
		break;
	case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
		req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
		switch (req->t_state) {
		case TCP_SYN_SENT:
			active_ofld_conn_reply(dev, skb, req);
			break;
		case TCP_SYN_RECV:
			passive_ofld_conn_reply(dev, skb, req);
			break;
		default:
			pr_err("%s unexpected ofld conn wr state %d\n",
			       __func__, req->t_state);
			break;
		}
		break;
	}
	return 0;
}

static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
{
3729 3730 3731 3732
	__be32 l2info;
	__be16 hdr_len, vlantag, len;
	u16 eth_hdr_len;
	int tcp_hdr_len, ip_hdr_len;
3733 3734 3735 3736
	u8 intf;
	struct cpl_rx_pkt *cpl = cplhdr(skb);
	struct cpl_pass_accept_req *req;
	struct tcp_options_received tmp_opt;
3737
	struct c4iw_dev *dev;
3738
	enum chip_type type;
3739

3740
	dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3741
	/* Store values from cpl_rx_pkt in temporary location. */
3742 3743 3744 3745
	vlantag = cpl->vlan;
	len = cpl->len;
	l2info  = cpl->l2info;
	hdr_len = cpl->hdr_len;
3746 3747 3748 3749 3750 3751 3752 3753 3754 3755
	intf = cpl->iff;

	__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));

	/*
	 * We need to parse the TCP options from SYN packet.
	 * to generate cpl_pass_accept_req.
	 */
	memset(&tmp_opt, 0, sizeof(tmp_opt));
	tcp_clear_options(&tmp_opt);
C
Christoph Paasch 已提交
3756
	tcp_parse_options(skb, &tmp_opt, 0, NULL);
3757 3758 3759

	req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
	memset(req, 0, sizeof(*req));
3760 3761
	req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
			 SYN_MAC_IDX_V(RX_MACIDX_G(
3762
			 be32_to_cpu(l2info))) |
3763
			 SYN_XACT_MATCH_F);
3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783
	type = dev->rdev.lldi.adapter_type;
	tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
	ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
	req->hdr_len =
		cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
	if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
		eth_hdr_len = is_t4(type) ?
				RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
				RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
		req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
					    IP_HDR_LEN_V(ip_hdr_len) |
					    ETH_HDR_LEN_V(eth_hdr_len));
	} else { /* T6 and later */
		eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
		req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
					    T6_IP_HDR_LEN_V(ip_hdr_len) |
					    T6_ETH_HDR_LEN_V(eth_hdr_len));
	}
	req->vlan = vlantag;
	req->len = len;
3784 3785
	req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
				    PASS_OPEN_TOS_V(tos));
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804
	req->tcpopt.mss = htons(tmp_opt.mss_clamp);
	if (tmp_opt.wscale_ok)
		req->tcpopt.wsf = tmp_opt.snd_wscale;
	req->tcpopt.tstamp = tmp_opt.saw_tstamp;
	if (tmp_opt.sack_ok)
		req->tcpopt.sack = 1;
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
	return;
}

static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
				  __be32 laddr, __be16 lport,
				  __be32 raddr, __be16 rport,
				  u32 rcv_isn, u32 filter, u16 window,
				  u32 rss_qid, u8 port_id)
{
	struct sk_buff *req_skb;
	struct fw_ofld_connection_wr *req;
	struct cpl_pass_accept_req *cpl = cplhdr(skb);
3805
	int ret;
3806 3807 3808 3809

	req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
	req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
	memset(req, 0, sizeof(*req));
3810
	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3811
	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3812
	req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
3813
	req->le.filter = (__force __be32) filter;
3814 3815 3816 3817 3818 3819 3820
	req->le.lport = lport;
	req->le.pport = rport;
	req->le.u.ipv4.lip = laddr;
	req->le.u.ipv4.pip = raddr;
	req->tcb.rcv_nxt = htonl(rcv_isn + 1);
	req->tcb.rcv_adv = htons(window);
	req->tcb.t_state_to_astid =
3821 3822 3823
		 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
			FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
			FW_OFLD_CONNECTION_WR_ASTID_V(
3824
			PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
3825 3826 3827 3828 3829

	/*
	 * We store the qid in opt2 which will be used by the firmware
	 * to send us the wr response.
	 */
3830
	req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
3831 3832 3833 3834 3835 3836 3837

	/*
	 * We initialize the MSS index in TCB to 0xF.
	 * So that when driver sends cpl_pass_accept_rpl
	 * TCB picks up the correct value. If this was 0
	 * TP will ignore any value > 0 for MSS index.
	 */
3838
	req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
H
Hariprasad S 已提交
3839
	req->cookie = (uintptr_t)skb;
3840 3841

	set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3842 3843 3844 3845 3846 3847 3848
	ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
	if (ret < 0) {
		pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
		       ret);
		kfree_skb(skb);
		kfree_skb(req_skb);
	}
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874
}

/*
 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
 * messages when a filter is being used instead of server to
 * redirect a syn packet. When packets hit filter they are redirected
 * to the offload queue and driver tries to establish the connection
 * using firmware work request.
 */
static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
{
	int stid;
	unsigned int filter;
	struct ethhdr *eh = NULL;
	struct vlan_ethhdr *vlan_eh = NULL;
	struct iphdr *iph;
	struct tcphdr *tcph;
	struct rss_header *rss = (void *)skb->data;
	struct cpl_rx_pkt *cpl = (void *)skb->data;
	struct cpl_pass_accept_req *req = (void *)(rss + 1);
	struct l2t_entry *e;
	struct dst_entry *dst;
	struct c4iw_ep *lep;
	u16 window;
	struct port_info *pi;
	struct net_device *pdev;
3875
	u16 rss_qid, eth_hdr_len;
3876 3877 3878 3879 3880
	int step;
	u32 tx_chan;
	struct neighbour *neigh;

	/* Drop all non-SYN packets */
3881
	if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893
		goto reject;

	/*
	 * Drop all packets which did not hit the filter.
	 * Unlikely to happen.
	 */
	if (!(rss->filter_hit && rss->filter_tid))
		goto reject;

	/*
	 * Calculate the server tid from filter hit index from cpl_rx_pkt.
	 */
3894
	stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3895 3896 3897 3898 3899 3900 3901

	lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
	if (!lep) {
		PDBG("%s connect request on invalid stid %d\n", __func__, stid);
		goto reject;
	}

3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917
	switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
	case CHELSIO_T4:
		eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
		break;
	case CHELSIO_T5:
		eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
		break;
	case CHELSIO_T6:
		eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
		break;
	default:
		pr_err("T%d Chip is not supported\n",
		       CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
		goto reject;
	}

3918
	if (eth_hdr_len == ETH_HLEN) {
3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938
		eh = (struct ethhdr *)(req + 1);
		iph = (struct iphdr *)(eh + 1);
	} else {
		vlan_eh = (struct vlan_ethhdr *)(req + 1);
		iph = (struct iphdr *)(vlan_eh + 1);
		skb->vlan_tci = ntohs(cpl->vlan);
	}

	if (iph->version != 0x4)
		goto reject;

	tcph = (struct tcphdr *)(iph + 1);
	skb_set_network_header(skb, (void *)iph - (void *)rss);
	skb_set_transport_header(skb, (void *)tcph - (void *)rss);
	skb_get(skb);

	PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
	     ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
	     ntohs(tcph->source), iph->tos);

3939 3940 3941
	dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
			 iph->tos);
	if (!dst) {
3942 3943 3944 3945 3946 3947
		pr_err("%s - failed to find dst entry!\n",
		       __func__);
		goto reject;
	}
	neigh = dst_neigh_lookup_skb(dst, skb);

3948 3949 3950 3951 3952 3953
	if (!neigh) {
		pr_err("%s - failed to allocate neigh!\n",
		       __func__);
		goto free_dst;
	}

3954 3955 3956 3957 3958 3959 3960 3961
	if (neigh->dev->flags & IFF_LOOPBACK) {
		pdev = ip_dev_find(&init_net, iph->daddr);
		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
				    pdev, 0);
		pi = (struct port_info *)netdev_priv(pdev);
		tx_chan = cxgb4_port_chan(pdev);
		dev_put(pdev);
	} else {
3962
		pdev = get_real_dev(neigh->dev);
3963
		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3964 3965 3966
					pdev, 0);
		pi = (struct port_info *)netdev_priv(pdev);
		tx_chan = cxgb4_port_chan(pdev);
3967
	}
3968
	neigh_release(neigh);
3969 3970 3971 3972 3973 3974 3975 3976
	if (!e) {
		pr_err("%s - failed to allocate l2t entry!\n",
		       __func__);
		goto free_dst;
	}

	step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
	rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3977
	window = (__force u16) htons((__force u16)tcph->window);
3978 3979

	/* Calcuate filter portion for LE region. */
3980 3981 3982
	filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
						    dev->rdev.lldi.ports[0],
						    e));
3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996

	/*
	 * Synthesize the cpl_pass_accept_req. We have everything except the
	 * TID. Once firmware sends a reply with TID we update the TID field
	 * in cpl and pass it through the regular cpl_pass_accept_req path.
	 */
	build_cpl_pass_accept_req(skb, stid, iph->tos);
	send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
			      tcph->source, ntohl(tcph->seq), filter, window,
			      rss_qid, pi->port_id);
	cxgb4_l2t_release(e);
free_dst:
	dst_release(dst);
reject:
3997 3998 3999
	return 0;
}

4000 4001 4002 4003
/*
 * These are the real handlers that are called from a
 * work queue.
 */
4004
static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017
	[CPL_ACT_ESTABLISH] = act_establish,
	[CPL_ACT_OPEN_RPL] = act_open_rpl,
	[CPL_RX_DATA] = rx_data,
	[CPL_ABORT_RPL_RSS] = abort_rpl,
	[CPL_ABORT_RPL] = abort_rpl,
	[CPL_PASS_OPEN_RPL] = pass_open_rpl,
	[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
	[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
	[CPL_PASS_ESTABLISH] = pass_establish,
	[CPL_PEER_CLOSE] = peer_close,
	[CPL_ABORT_REQ_RSS] = peer_abort,
	[CPL_CLOSE_CON_RPL] = close_con_rpl,
	[CPL_RDMA_TERMINATE] = terminate,
4018
	[CPL_FW4_ACK] = fw4_ack,
4019
	[CPL_FW6_MSG] = deferred_fw6_msg,
4020
	[CPL_RX_PKT] = rx_pkt,
4021 4022
	[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
	[FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
4023 4024 4025 4026 4027 4028 4029
};

static void process_timeout(struct c4iw_ep *ep)
{
	struct c4iw_qp_attributes attrs;
	int abort = 1;

4030
	mutex_lock(&ep->com.mutex);
4031 4032
	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
	     ep->com.state);
4033
	set_bit(TIMEDOUT, &ep->com.history);
4034 4035 4036 4037 4038
	switch (ep->com.state) {
	case MPA_REQ_SENT:
		connect_reply_upcall(ep, -ETIMEDOUT);
		break;
	case MPA_REQ_WAIT:
4039
	case MPA_REQ_RCVD:
4040
	case MPA_REP_SENT:
4041
	case FPDU_MODE:
4042 4043 4044 4045 4046 4047 4048 4049 4050
		break;
	case CLOSING:
	case MORIBUND:
		if (ep->com.cm_id && ep->com.qp) {
			attrs.next_state = C4IW_QP_STATE_ERROR;
			c4iw_modify_qp(ep->com.qp->rhp,
				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
				     &attrs, 1);
		}
4051
		close_complete_upcall(ep, -ETIMEDOUT);
4052
		break;
S
Steve Wise 已提交
4053 4054 4055 4056 4057 4058 4059 4060 4061 4062
	case ABORTING:
	case DEAD:

		/*
		 * These states are expected if the ep timed out at the same
		 * time as another thread was calling stop_ep_timer().
		 * So we silently do nothing for these states.
		 */
		abort = 0;
		break;
4063
	default:
J
Julia Lawall 已提交
4064
		WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4065 4066 4067
			__func__, ep, ep->hwtid, ep->com.state);
		abort = 0;
	}
4068
	mutex_unlock(&ep->com.mutex);
4069 4070
	if (abort)
		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083
	c4iw_put_ep(&ep->com);
}

static void process_timedout_eps(void)
{
	struct c4iw_ep *ep;

	spin_lock_irq(&timeout_lock);
	while (!list_empty(&timeout_list)) {
		struct list_head *tmp;

		tmp = timeout_list.next;
		list_del(tmp);
S
Steve Wise 已提交
4084 4085
		tmp->next = NULL;
		tmp->prev = NULL;
4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097
		spin_unlock_irq(&timeout_lock);
		ep = list_entry(tmp, struct c4iw_ep, entry);
		process_timeout(ep);
		spin_lock_irq(&timeout_lock);
	}
	spin_unlock_irq(&timeout_lock);
}

static void process_work(struct work_struct *work)
{
	struct sk_buff *skb = NULL;
	struct c4iw_dev *dev;
4098
	struct cpl_act_establish *rpl;
4099 4100 4101
	unsigned int opcode;
	int ret;

S
Steve Wise 已提交
4102
	process_timedout_eps();
4103 4104 4105 4106 4107 4108 4109 4110 4111
	while ((skb = skb_dequeue(&rxq))) {
		rpl = cplhdr(skb);
		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
		opcode = rpl->ot.opcode;

		BUG_ON(!work_handlers[opcode]);
		ret = work_handlers[opcode](dev, skb);
		if (!ret)
			kfree_skb(skb);
S
Steve Wise 已提交
4112
		process_timedout_eps();
4113 4114 4115 4116 4117 4118 4119 4120
	}
}

static DECLARE_WORK(skb_work, process_work);

static void ep_timeout(unsigned long arg)
{
	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
4121
	int kickit = 0;
4122 4123

	spin_lock(&timeout_lock);
4124
	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
S
Steve Wise 已提交
4125 4126 4127 4128 4129 4130 4131
		/*
		 * Only insert if it is not already on the list.
		 */
		if (!ep->entry.next) {
			list_add_tail(&ep->entry, &timeout_list);
			kickit = 1;
		}
4132
	}
4133
	spin_unlock(&timeout_lock);
4134 4135
	if (kickit)
		queue_work(workq, &skb_work);
4136 4137
}

4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164
/*
 * All the CM events are handled on a work queue to have a safe context.
 */
static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
{

	/*
	 * Save dev in the skb->cb area.
	 */
	*((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;

	/*
	 * Queue the skb and schedule the worker thread.
	 */
	skb_queue_tail(&rxq, skb);
	queue_work(workq, &skb_work);
	return 0;
}

static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);

	if (rpl->status != CPL_ERR_NONE) {
		printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
		       "for tid %u\n", rpl->status, GET_TID(rpl));
	}
4165
	kfree_skb(skb);
4166 4167 4168
	return 0;
}

4169 4170 4171 4172 4173 4174 4175 4176 4177
static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_fw6_msg *rpl = cplhdr(skb);
	struct c4iw_wr_wait *wr_waitp;
	int ret;

	PDBG("%s type %u\n", __func__, rpl->type);

	switch (rpl->type) {
4178
	case FW6_TYPE_WR_RPL:
4179
		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
4180
		wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
4181
		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
4182 4183
		if (wr_waitp)
			c4iw_wake_up(wr_waitp, ret ? -ret : 0);
4184
		kfree_skb(skb);
4185
		break;
4186 4187
	case FW6_TYPE_CQE:
	case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
4188
		sched(dev, skb);
4189
		break;
4190 4191 4192
	default:
		printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
		       rpl->type);
4193
		kfree_skb(skb);
4194 4195 4196 4197 4198
		break;
	}
	return 0;
}

S
Steve Wise 已提交
4199 4200 4201 4202 4203 4204
static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_abort_req_rss *req = cplhdr(skb);
	struct c4iw_ep *ep;
	unsigned int tid = GET_TID(req);

4205 4206
	ep = get_ep_from_tid(dev, tid);
	/* This EP will be dereferenced in peer_abort() */
4207 4208 4209 4210 4211 4212
	if (!ep) {
		printk(KERN_WARNING MOD
		       "Abort on non-existent endpoint, tid %d\n", tid);
		kfree_skb(skb);
		return 0;
	}
4213
	if (is_neg_adv(req->status)) {
4214 4215 4216
		PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
		     __func__, ep->hwtid, req->status,
		     neg_adv_str(req->status));
4217
		goto out;
S
Steve Wise 已提交
4218 4219 4220 4221 4222 4223
	}
	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
	     ep->com.state);

	/*
	 * Wake up any threads in rdma_init() or rdma_fini().
4224 4225
	 * However, if we are on MPAv2 and want to retry with MPAv1
	 * then, don't wake up yet.
S
Steve Wise 已提交
4226
	 */
4227 4228 4229 4230 4231
	if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
		if (ep->com.state != MPA_REQ_SENT)
			c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
	} else
		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
4232
out:
S
Steve Wise 已提交
4233 4234 4235 4236
	sched(dev, skb);
	return 0;
}

4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252
/*
 * Most upcalls from the T4 Core go to sched() to
 * schedule the processing on a work queue.
 */
c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
	[CPL_ACT_ESTABLISH] = sched,
	[CPL_ACT_OPEN_RPL] = sched,
	[CPL_RX_DATA] = sched,
	[CPL_ABORT_RPL_RSS] = sched,
	[CPL_ABORT_RPL] = sched,
	[CPL_PASS_OPEN_RPL] = sched,
	[CPL_CLOSE_LISTSRV_RPL] = sched,
	[CPL_PASS_ACCEPT_REQ] = sched,
	[CPL_PASS_ESTABLISH] = sched,
	[CPL_PEER_CLOSE] = sched,
	[CPL_CLOSE_CON_RPL] = sched,
S
Steve Wise 已提交
4253
	[CPL_ABORT_REQ_RSS] = peer_abort_intr,
4254 4255 4256
	[CPL_RDMA_TERMINATE] = sched,
	[CPL_FW4_ACK] = sched,
	[CPL_SET_TCB_RPL] = set_tcb_rpl,
4257 4258
	[CPL_FW6_MSG] = fw6_msg,
	[CPL_RX_PKT] = sched
4259 4260
};

4261 4262
int __init c4iw_cm_init(void)
{
4263
	spin_lock_init(&timeout_lock);
4264 4265 4266 4267 4268 4269 4270 4271 4272
	skb_queue_head_init(&rxq);

	workq = create_singlethread_workqueue("iw_cxgb4");
	if (!workq)
		return -ENOMEM;

	return 0;
}

4273
void c4iw_cm_term(void)
4274
{
4275
	WARN_ON(!list_empty(&timeout_list));
4276 4277 4278
	flush_workqueue(workq);
	destroy_workqueue(workq);
}