ipoib_ib.c 27.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 4 5
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/delay.h>
#include <linux/dma-mapping.h>

39
#include <rdma/ib_cache.h>
E
Eli Cohen 已提交
40 41
#include <linux/ip.h>
#include <linux/tcp.h>
L
Linus Torvalds 已提交
42 43 44 45 46 47 48 49 50 51 52

#include "ipoib.h"

#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
static int data_debug_level;

module_param(data_debug_level, int, 0644);
MODULE_PARM_DESC(data_debug_level,
		 "Enable data path debug tracing if > 0");
#endif

53
static DEFINE_MUTEX(pkey_mutex);
L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
				 struct ib_pd *pd, struct ib_ah_attr *attr)
{
	struct ipoib_ah *ah;

	ah = kmalloc(sizeof *ah, GFP_KERNEL);
	if (!ah)
		return NULL;

	ah->dev       = dev;
	ah->last_send = 0;
	kref_init(&ah->ref);

	ah->ah = ib_create_ah(pd, attr);
	if (IS_ERR(ah->ah)) {
		kfree(ah);
		ah = NULL;
	} else
		ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);

	return ah;
}

void ipoib_free_ah(struct kref *kref)
{
	struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
	struct ipoib_dev_priv *priv = netdev_priv(ah->dev);

	unsigned long flags;

85 86 87
	spin_lock_irqsave(&priv->lock, flags);
	list_add_tail(&ah->list, &priv->dead_ahs);
	spin_unlock_irqrestore(&priv->lock, flags);
L
Linus Torvalds 已提交
88 89
}

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
				  u64 mapping[IPOIB_UD_RX_SG])
{
	if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
		ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
				    DMA_FROM_DEVICE);
		ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
				  DMA_FROM_DEVICE);
	} else
		ib_dma_unmap_single(priv->ca, mapping[0],
				    IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
				    DMA_FROM_DEVICE);
}

static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
				   struct sk_buff *skb,
				   unsigned int length)
{
	if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
		unsigned int size;
		/*
		 * There is only two buffers needed for max_payload = 4K,
		 * first buf size is IPOIB_UD_HEAD_SIZE
		 */
		skb->tail += IPOIB_UD_HEAD_SIZE;
		skb->len  += length;

		size = length - IPOIB_UD_HEAD_SIZE;

		frag->size     = size;
		skb->data_len += size;
		skb->truesize += size;
	} else
		skb_put(skb, length);

}

128
static int ipoib_ib_post_receive(struct net_device *dev, int id)
L
Linus Torvalds 已提交
129
{
130
	struct ipoib_dev_priv *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
131
	struct ib_recv_wr *bad_wr;
132 133
	int ret;

134 135 136
	priv->rx_wr.wr_id   = id | IPOIB_OP_RECV;
	priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
	priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
137 138


139
	ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
140 141
	if (unlikely(ret)) {
		ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
142
		ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
143 144 145
		dev_kfree_skb_any(priv->rx_ring[id].skb);
		priv->rx_ring[id].skb = NULL;
	}
L
Linus Torvalds 已提交
146

147
	return ret;
L
Linus Torvalds 已提交
148 149
}

150
static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
L
Linus Torvalds 已提交
151 152 153
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct sk_buff *skb;
154 155
	int buf_size;
	u64 *mapping;
L
Linus Torvalds 已提交
156

157 158 159 160 161 162 163 164
	if (ipoib_ud_need_sg(priv->max_ib_mtu))
		buf_size = IPOIB_UD_HEAD_SIZE;
	else
		buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);

	skb = dev_alloc_skb(buf_size + 4);
	if (unlikely(!skb))
		return NULL;
165 166 167 168 169 170 171 172

	/*
	 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
	 * header.  So we need 4 more bytes to get to 48 and align the
	 * IP header to a multiple of 16.
	 */
	skb_reserve(skb, 4);

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	mapping = priv->rx_ring[id].mapping;
	mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
				       DMA_FROM_DEVICE);
	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
		goto error;

	if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
		struct page *page = alloc_page(GFP_ATOMIC);
		if (!page)
			goto partial_error;
		skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
		mapping[1] =
			ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
					0, PAGE_SIZE, DMA_FROM_DEVICE);
		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
			goto partial_error;
L
Linus Torvalds 已提交
189 190
	}

191 192
	priv->rx_ring[id].skb = skb;
	return skb;
193

194 195 196 197 198
partial_error:
	ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
error:
	dev_kfree_skb_any(skb);
	return NULL;
L
Linus Torvalds 已提交
199 200 201 202 203 204 205
}

static int ipoib_ib_post_receives(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int i;

206
	for (i = 0; i < ipoib_recvq_size; ++i) {
207
		if (!ipoib_alloc_rx_skb(dev, i)) {
208 209 210
			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
			return -ENOMEM;
		}
L
Linus Torvalds 已提交
211 212 213 214 215 216 217 218 219
		if (ipoib_ib_post_receive(dev, i)) {
			ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
			return -EIO;
		}
	}

	return 0;
}

220
static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
L
Linus Torvalds 已提交
221 222
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
223 224
	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
	struct sk_buff *skb;
225
	u64 mapping[IPOIB_UD_RX_SG];
L
Linus Torvalds 已提交
226

227 228
	ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
		       wr_id, wc->status);
L
Linus Torvalds 已提交
229

230 231 232 233 234 235 236 237 238 239 240 241 242
	if (unlikely(wr_id >= ipoib_recvq_size)) {
		ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
			   wr_id, ipoib_recvq_size);
		return;
	}

	skb  = priv->rx_ring[wr_id].skb;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		if (wc->status != IB_WC_WR_FLUSH_ERR)
			ipoib_warn(priv, "failed recv event "
				   "(status=%d, wrid=%d vend_err %x)\n",
				   wc->status, wr_id, wc->vendor_err);
243
		ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
244 245 246 247
		dev_kfree_skb_any(skb);
		priv->rx_ring[wr_id].skb = NULL;
		return;
	}
L
Linus Torvalds 已提交
248

249 250 251 252 253 254 255
	/*
	 * Drop packets that this interface sent, ie multicast packets
	 * that the HCA has replicated.
	 */
	if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
		goto repost;

256 257 258
	memcpy(mapping, priv->rx_ring[wr_id].mapping,
	       IPOIB_UD_RX_SG * sizeof *mapping);

259 260 261 262
	/*
	 * If we can't allocate a new RX buffer, dump
	 * this packet and reuse the old buffer.
	 */
263
	if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
264
		++dev->stats.rx_dropped;
265 266 267 268 269 270
		goto repost;
	}

	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
		       wc->byte_len, wc->slid);

271 272
	ipoib_ud_dma_unmap_rx(priv, mapping);
	ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
273 274 275

	skb_pull(skb, IB_GRH_BYTES);

276 277 278 279 280
	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
	skb_reset_mac_header(skb);
	skb_pull(skb, IPOIB_ENCAP_LEN);

	dev->last_rx = jiffies;
281 282
	++dev->stats.rx_packets;
	dev->stats.rx_bytes += skb->len;
283 284 285 286

	skb->dev = dev;
	/* XXX get correct PACKET_ type here */
	skb->pkt_type = PACKET_HOST;
287 288 289 290

	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
		skb->ip_summed = CHECKSUM_UNNECESSARY;

V
Vladimir Sokolovsky 已提交
291 292 293 294
	if (dev->features & NETIF_F_LRO)
		lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
	else
		netif_receive_skb(skb);
L
Linus Torvalds 已提交
295

296 297 298 299 300
repost:
	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
		ipoib_warn(priv, "ipoib_ib_post_receive failed "
			   "for buf %d\n", wr_id);
}
L
Linus Torvalds 已提交
301

E
Eli Cohen 已提交
302 303 304 305 306 307
static int ipoib_dma_map_tx(struct ib_device *ca,
			    struct ipoib_tx_buf *tx_req)
{
	struct sk_buff *skb = tx_req->skb;
	u64 *mapping = tx_req->mapping;
	int i;
E
Eli Cohen 已提交
308
	int off;
E
Eli Cohen 已提交
309

E
Eli Cohen 已提交
310 311 312 313 314 315 316 317 318
	if (skb_headlen(skb)) {
		mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
					       DMA_TO_DEVICE);
		if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
			return -EIO;

		off = 1;
	} else
		off = 0;
E
Eli Cohen 已提交
319 320 321

	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
E
Eli Cohen 已提交
322
		mapping[i + off] = ib_dma_map_page(ca, frag->page,
E
Eli Cohen 已提交
323 324
						 frag->page_offset, frag->size,
						 DMA_TO_DEVICE);
E
Eli Cohen 已提交
325
		if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
E
Eli Cohen 已提交
326 327 328 329 330 331 332
			goto partial_error;
	}
	return 0;

partial_error:
	for (; i > 0; --i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
E
Eli Cohen 已提交
333
		ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
E
Eli Cohen 已提交
334
	}
E
Eli Cohen 已提交
335 336 337 338

	if (off)
		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);

E
Eli Cohen 已提交
339 340 341 342 343 344 345 346 347
	return -EIO;
}

static void ipoib_dma_unmap_tx(struct ib_device *ca,
			       struct ipoib_tx_buf *tx_req)
{
	struct sk_buff *skb = tx_req->skb;
	u64 *mapping = tx_req->mapping;
	int i;
E
Eli Cohen 已提交
348
	int off;
E
Eli Cohen 已提交
349

E
Eli Cohen 已提交
350 351 352 353 354
	if (skb_headlen(skb)) {
		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
		off = 1;
	} else
		off = 0;
E
Eli Cohen 已提交
355 356 357

	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
E
Eli Cohen 已提交
358
		ib_dma_unmap_page(ca, mapping[i + off], frag->size,
E
Eli Cohen 已提交
359 360 361 362
				  DMA_TO_DEVICE);
	}
}

363 364 365 366 367
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	unsigned int wr_id = wc->wr_id;
	struct ipoib_tx_buf *tx_req;
L
Linus Torvalds 已提交
368

369 370
	ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
		       wr_id, wc->status);
L
Linus Torvalds 已提交
371

372 373 374 375
	if (unlikely(wr_id >= ipoib_sendq_size)) {
		ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
			   wr_id, ipoib_sendq_size);
		return;
L
Linus Torvalds 已提交
376
	}
377 378 379

	tx_req = &priv->tx_ring[wr_id];

E
Eli Cohen 已提交
380
	ipoib_dma_unmap_tx(priv->ca, tx_req);
381

382 383
	++dev->stats.tx_packets;
	dev->stats.tx_bytes += tx_req->skb->len;
384 385 386 387

	dev_kfree_skb_any(tx_req->skb);

	++priv->tx_tail;
388 389 390
	if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
	    netif_queue_stopped(dev) &&
	    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
391 392 393 394 395 396 397 398 399
		netif_wake_queue(dev);

	if (wc->status != IB_WC_SUCCESS &&
	    wc->status != IB_WC_WR_FLUSH_ERR)
		ipoib_warn(priv, "failed send event "
			   "(status=%d, wrid=%d vend_err %x)\n",
			   wc->status, wr_id, wc->vendor_err);
}

400 401 402 403 404 405 406 407 408 409 410
static int poll_tx(struct ipoib_dev_priv *priv)
{
	int n, i;

	n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
	for (i = 0; i < n; ++i)
		ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);

	return n == MAX_SEND_CQE;
}

411
int ipoib_poll(struct napi_struct *napi, int budget)
412
{
413 414
	struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
	struct net_device *dev = priv->dev;
R
Roland Dreier 已提交
415 416 417 418 419 420
	int done;
	int t;
	int n, i;

	done  = 0;

421 422 423 424
poll_more:
	while (done < budget) {
		int max = (budget - done);

R
Roland Dreier 已提交
425
		t = min(IPOIB_NUM_WC, max);
426
		n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
R
Roland Dreier 已提交
427

428
		for (i = 0; i < n; i++) {
R
Roland Dreier 已提交
429 430
			struct ib_wc *wc = priv->ibwc + i;

431
			if (wc->wr_id & IPOIB_OP_RECV) {
R
Roland Dreier 已提交
432
				++done;
433 434 435 436
				if (wc->wr_id & IPOIB_OP_CM)
					ipoib_cm_handle_rx_wc(dev, wc);
				else
					ipoib_ib_handle_rx_wc(dev, wc);
437 438
			} else
				ipoib_cm_handle_tx_wc(priv->dev, wc);
R
Roland Dreier 已提交
439 440
		}

441
		if (n != t)
R
Roland Dreier 已提交
442 443 444
			break;
	}

445
	if (done < budget) {
V
Vladimir Sokolovsky 已提交
446 447 448
		if (dev->features & NETIF_F_LRO)
			lro_flush_all(&priv->lro.lro_mgr);

449
		netif_rx_complete(dev, napi);
450
		if (unlikely(ib_req_notify_cq(priv->recv_cq,
R
Roland Dreier 已提交
451 452
					      IB_CQ_NEXT_COMP |
					      IB_CQ_REPORT_MISSED_EVENTS)) &&
453 454
		    netif_rx_reschedule(dev, napi))
			goto poll_more;
R
Roland Dreier 已提交
455 456
	}

457
	return done;
L
Linus Torvalds 已提交
458 459 460 461
}

void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
{
462 463 464 465
	struct net_device *dev = dev_ptr;
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	netif_rx_schedule(dev, &priv->napi);
L
Linus Torvalds 已提交
466 467
}

468 469 470 471
static void drain_tx_cq(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

472
	netif_tx_lock(dev);
473 474 475 476 477 478
	while (poll_tx(priv))
		; /* nothing */

	if (netif_queue_stopped(dev))
		mod_timer(&priv->poll_timer, jiffies + 1);

479
	netif_tx_unlock(dev);
480 481 482 483
}

void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
{
484 485 486
	struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);

	mod_timer(&priv->poll_timer, jiffies);
487 488
}

L
Linus Torvalds 已提交
489 490 491
static inline int post_send(struct ipoib_dev_priv *priv,
			    unsigned int wr_id,
			    struct ib_ah *address, u32 qpn,
E
Eli Cohen 已提交
492 493
			    struct ipoib_tx_buf *tx_req,
			    void *head, int hlen)
L
Linus Torvalds 已提交
494 495
{
	struct ib_send_wr *bad_wr;
E
Eli Cohen 已提交
496 497 498 499 500 501 502 503 504 505 506 507
	int i, off;
	struct sk_buff *skb = tx_req->skb;
	skb_frag_t *frags = skb_shinfo(skb)->frags;
	int nr_frags = skb_shinfo(skb)->nr_frags;
	u64 *mapping = tx_req->mapping;

	if (skb_headlen(skb)) {
		priv->tx_sge[0].addr         = mapping[0];
		priv->tx_sge[0].length       = skb_headlen(skb);
		off = 1;
	} else
		off = 0;
L
Linus Torvalds 已提交
508

E
Eli Cohen 已提交
509
	for (i = 0; i < nr_frags; ++i) {
E
Eli Cohen 已提交
510 511
		priv->tx_sge[i + off].addr = mapping[i + off];
		priv->tx_sge[i + off].length = frags[i].size;
E
Eli Cohen 已提交
512
	}
E
Eli Cohen 已提交
513
	priv->tx_wr.num_sge	     = nr_frags + off;
E
Eli Cohen 已提交
514 515 516
	priv->tx_wr.wr_id 	     = wr_id;
	priv->tx_wr.wr.ud.remote_qpn = qpn;
	priv->tx_wr.wr.ud.ah 	     = address;
L
Linus Torvalds 已提交
517

E
Eli Cohen 已提交
518 519 520 521 522 523 524 525
	if (head) {
		priv->tx_wr.wr.ud.mss	 = skb_shinfo(skb)->gso_size;
		priv->tx_wr.wr.ud.header = head;
		priv->tx_wr.wr.ud.hlen	 = hlen;
		priv->tx_wr.opcode	 = IB_WR_LSO;
	} else
		priv->tx_wr.opcode	 = IB_WR_SEND;

L
Linus Torvalds 已提交
526 527 528 529 530 531 532
	return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
}

void ipoib_send(struct net_device *dev, struct sk_buff *skb,
		struct ipoib_ah *address, u32 qpn)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
533
	struct ipoib_tx_buf *tx_req;
E
Eli Cohen 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
	int hlen;
	void *phead;

	if (skb_is_gso(skb)) {
		hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
		phead = skb->data;
		if (unlikely(!skb_pull(skb, hlen))) {
			ipoib_warn(priv, "linear data too small\n");
			++dev->stats.tx_dropped;
			++dev->stats.tx_errors;
			dev_kfree_skb_any(skb);
			return;
		}
	} else {
		if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
			ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
				   skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
			++dev->stats.tx_dropped;
			++dev->stats.tx_errors;
			ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
			return;
		}
		phead = NULL;
		hlen  = 0;
L
Linus Torvalds 已提交
558 559 560 561 562 563 564 565 566 567 568 569
	}

	ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
		       skb->len, address, qpn);

	/*
	 * We put the skb into the tx_ring _before_ we call post_send()
	 * because it's entirely possible that the completion handler will
	 * run before we execute anything after the post_send().  That
	 * means we have to make sure everything is properly recorded and
	 * our state is consistent before we call post_send().
	 */
570
	tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
L
Linus Torvalds 已提交
571
	tx_req->skb = skb;
E
Eli Cohen 已提交
572
	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
573
		++dev->stats.tx_errors;
574 575 576
		dev_kfree_skb_any(skb);
		return;
	}
L
Linus Torvalds 已提交
577

578 579 580 581 582
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
	else
		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;

583 584 585 586 587 588 589
	if (++priv->tx_outstanding == ipoib_sendq_size) {
		ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
		if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
			ipoib_warn(priv, "request notify on send CQ failed\n");
		netif_stop_queue(dev);
	}

590
	if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
E
Eli Cohen 已提交
591
			       address->ah, qpn, tx_req, phead, hlen))) {
L
Linus Torvalds 已提交
592
		ipoib_warn(priv, "post_send failed\n");
593
		++dev->stats.tx_errors;
594
		--priv->tx_outstanding;
E
Eli Cohen 已提交
595
		ipoib_dma_unmap_tx(priv->ca, tx_req);
L
Linus Torvalds 已提交
596
		dev_kfree_skb_any(skb);
597 598
		if (netif_queue_stopped(dev))
			netif_wake_queue(dev);
L
Linus Torvalds 已提交
599 600 601 602 603
	} else {
		dev->trans_start = jiffies;

		address->last_send = priv->tx_head;
		++priv->tx_head;
604
		skb_orphan(skb);
L
Linus Torvalds 已提交
605 606

	}
607 608

	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
609 610
		while (poll_tx(priv))
			; /* nothing */
L
Linus Torvalds 已提交
611 612 613 614 615 616 617
}

static void __ipoib_reap_ah(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_ah *ah, *tah;
	LIST_HEAD(remove_list);
618 619 620 621
	unsigned long flags;

	netif_tx_lock_bh(dev);
	spin_lock_irqsave(&priv->lock, flags);
L
Linus Torvalds 已提交
622 623

	list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
624
		if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
L
Linus Torvalds 已提交
625
			list_del(&ah->list);
626 627
			ib_destroy_ah(ah->ah);
			kfree(ah);
L
Linus Torvalds 已提交
628
		}
629 630 631

	spin_unlock_irqrestore(&priv->lock, flags);
	netif_tx_unlock_bh(dev);
L
Linus Torvalds 已提交
632 633
}

D
David Howells 已提交
634
void ipoib_reap_ah(struct work_struct *work)
L
Linus Torvalds 已提交
635
{
D
David Howells 已提交
636 637 638
	struct ipoib_dev_priv *priv =
		container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
	struct net_device *dev = priv->dev;
L
Linus Torvalds 已提交
639 640 641 642

	__ipoib_reap_ah(dev);

	if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
643 644
		queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
				   round_jiffies_relative(HZ));
L
Linus Torvalds 已提交
645 646
}

647 648 649 650 651
static void ipoib_ib_tx_timer_func(unsigned long ctx)
{
	drain_tx_cq((struct net_device *)ctx);
}

L
Linus Torvalds 已提交
652 653 654 655 656
int ipoib_ib_dev_open(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int ret;

657 658 659 660 661 662 663
	if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
		ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
		clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
		return -1;
	}
	set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);

664
	ret = ipoib_init_qp(dev);
L
Linus Torvalds 已提交
665
	if (ret) {
666
		ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
L
Linus Torvalds 已提交
667 668 669 670 671 672
		return -1;
	}

	ret = ipoib_ib_post_receives(dev);
	if (ret) {
		ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
673
		ipoib_ib_dev_stop(dev, 1);
L
Linus Torvalds 已提交
674 675 676
		return -1;
	}

677 678
	ret = ipoib_cm_dev_open(dev);
	if (ret) {
679
		ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
680
		ipoib_ib_dev_stop(dev, 1);
681 682 683
		return -1;
	}

L
Linus Torvalds 已提交
684
	clear_bit(IPOIB_STOP_REAPER, &priv->flags);
685 686
	queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
			   round_jiffies_relative(HZ));
L
Linus Torvalds 已提交
687

688 689 690 691
	init_timer(&priv->poll_timer);
	priv->poll_timer.function = ipoib_ib_tx_timer_func;
	priv->poll_timer.data = (unsigned long)dev;

L
Leonid Arsh 已提交
692 693
	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);

L
Linus Torvalds 已提交
694 695 696
	return 0;
}

L
Leonid Arsh 已提交
697 698 699 700 701
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	u16 pkey_index = 0;

702
	if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
L
Leonid Arsh 已提交
703 704 705 706 707
		clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
	else
		set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
}

L
Linus Torvalds 已提交
708 709 710 711
int ipoib_ib_dev_up(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

L
Leonid Arsh 已提交
712 713 714 715 716 717 718
	ipoib_pkey_dev_check_presence(dev);

	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
		ipoib_dbg(priv, "PKEY is not assigned.\n");
		return 0;
	}

L
Linus Torvalds 已提交
719 720 721 722 723
	set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);

	return ipoib_mcast_start_thread(dev);
}

724
int ipoib_ib_dev_down(struct net_device *dev, int flush)
L
Linus Torvalds 已提交
725 726 727 728 729 730 731 732 733 734
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	ipoib_dbg(priv, "downing ib_dev\n");

	clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
	netif_carrier_off(dev);

	/* Shutdown the P_Key thread if still active */
	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
735
		mutex_lock(&pkey_mutex);
L
Linus Torvalds 已提交
736
		set_bit(IPOIB_PKEY_STOP, &priv->flags);
737
		cancel_delayed_work(&priv->pkey_poll_task);
738
		mutex_unlock(&pkey_mutex);
739 740
		if (flush)
			flush_workqueue(ipoib_workqueue);
L
Linus Torvalds 已提交
741 742
	}

743
	ipoib_mcast_stop_thread(dev, flush);
L
Linus Torvalds 已提交
744 745 746 747 748 749 750 751 752 753 754 755 756
	ipoib_mcast_dev_flush(dev);

	ipoib_flush_paths(dev);

	return 0;
}

static int recvs_pending(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int pending = 0;
	int i;

757
	for (i = 0; i < ipoib_recvq_size; ++i)
L
Linus Torvalds 已提交
758 759 760 761 762 763
		if (priv->rx_ring[i].skb)
			++pending;

	return pending;
}

764 765 766 767
void ipoib_drain_cq(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int i, n;
768 769 770 771 772 773 774 775

	/*
	 * We call completion handling routines that expect to be
	 * called from the BH-disabled NAPI poll context, so disable
	 * BHs here too.
	 */
	local_bh_disable();

776
	do {
777
		n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
778
		for (i = 0; i < n; ++i) {
779 780 781 782 783 784 785 786
			/*
			 * Convert any successful completions to flush
			 * errors to avoid passing packets up the
			 * stack after bringing the device down.
			 */
			if (priv->ibwc[i].status == IB_WC_SUCCESS)
				priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;

787 788 789 790 791
			if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
				if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
					ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
				else
					ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
792 793
			} else
				ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
794 795
		}
	} while (n == IPOIB_NUM_WC);
796 797 798

	while (poll_tx(priv))
		; /* nothing */
799 800

	local_bh_enable();
801 802
}

803
int ipoib_ib_dev_stop(struct net_device *dev, int flush)
L
Linus Torvalds 已提交
804 805 806 807
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ib_qp_attr qp_attr;
	unsigned long begin;
808
	struct ipoib_tx_buf *tx_req;
809
	int i;
L
Linus Torvalds 已提交
810

L
Leonid Arsh 已提交
811 812
	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);

813 814
	ipoib_cm_dev_stop(dev);

815 816 817 818
	/*
	 * Move our QP to the error state and then reinitialize in
	 * when all work requests have completed or have been flushed.
	 */
L
Linus Torvalds 已提交
819
	qp_attr.qp_state = IB_QPS_ERR;
820
	if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
L
Linus Torvalds 已提交
821 822 823 824 825 826 827 828 829 830 831 832 833 834
		ipoib_warn(priv, "Failed to modify QP to ERROR state\n");

	/* Wait for all sends and receives to complete */
	begin = jiffies;

	while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
		if (time_after(jiffies, begin + 5 * HZ)) {
			ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
				   priv->tx_head - priv->tx_tail, recvs_pending(dev));

			/*
			 * assume the HW is wedged and just free up
			 * all our pending work requests.
			 */
835
			while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
L
Linus Torvalds 已提交
836
				tx_req = &priv->tx_ring[priv->tx_tail &
837
							(ipoib_sendq_size - 1)];
E
Eli Cohen 已提交
838
				ipoib_dma_unmap_tx(priv->ca, tx_req);
L
Linus Torvalds 已提交
839 840
				dev_kfree_skb_any(tx_req->skb);
				++priv->tx_tail;
841
				--priv->tx_outstanding;
L
Linus Torvalds 已提交
842 843
			}

844 845 846 847 848 849
			for (i = 0; i < ipoib_recvq_size; ++i) {
				struct ipoib_rx_buf *rx_req;

				rx_req = &priv->rx_ring[i];
				if (!rx_req->skb)
					continue;
850 851
				ipoib_ud_dma_unmap_rx(priv,
						      priv->rx_ring[i].mapping);
852 853 854
				dev_kfree_skb_any(rx_req->skb);
				rx_req->skb = NULL;
			}
L
Linus Torvalds 已提交
855 856 857 858

			goto timeout;
		}

859
		ipoib_drain_cq(dev);
R
Roland Dreier 已提交
860

L
Linus Torvalds 已提交
861 862 863 864 865 866
		msleep(1);
	}

	ipoib_dbg(priv, "All sends and receives done.\n");

timeout:
867
	del_timer_sync(&priv->poll_timer);
L
Linus Torvalds 已提交
868
	qp_attr.qp_state = IB_QPS_RESET;
869
	if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
L
Linus Torvalds 已提交
870 871 872 873 874
		ipoib_warn(priv, "Failed to modify QP to RESET state\n");

	/* Wait for all AHs to be reaped */
	set_bit(IPOIB_STOP_REAPER, &priv->flags);
	cancel_delayed_work(&priv->ah_reap_task);
875 876
	if (flush)
		flush_workqueue(ipoib_workqueue);
L
Linus Torvalds 已提交
877 878 879 880 881 882 883 884 885 886 887 888 889 890

	begin = jiffies;

	while (!list_empty(&priv->dead_ahs)) {
		__ipoib_reap_ah(dev);

		if (time_after(jiffies, begin + HZ)) {
			ipoib_warn(priv, "timing out; will leak address handles\n");
			break;
		}

		msleep(1);
	}

891
	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
R
Roland Dreier 已提交
892

L
Linus Torvalds 已提交
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
	return 0;
}

int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	priv->ca = ca;
	priv->port = port;
	priv->qp = NULL;

	if (ipoib_transport_dev_init(dev, ca)) {
		printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
		return -ENODEV;
	}

	if (dev->flags & IFF_UP) {
		if (ipoib_ib_dev_open(dev)) {
			ipoib_transport_dev_cleanup(dev);
			return -ENODEV;
		}
	}

	return 0;
}

919 920
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
				enum ipoib_flush_level level)
L
Linus Torvalds 已提交
921
{
922
	struct ipoib_dev_priv *cpriv;
D
David Howells 已提交
923
	struct net_device *dev = priv->dev;
924 925 926
	u16 new_index;

	mutex_lock(&priv->vlan_mutex);
L
Linus Torvalds 已提交
927

928 929 930 931 932
	/*
	 * Flush any child interfaces too -- they might be up even if
	 * the parent is down.
	 */
	list_for_each_entry(cpriv, &priv->child_intfs, list)
933
		__ipoib_ib_dev_flush(cpriv, level);
934 935 936 937

	mutex_unlock(&priv->vlan_mutex);

	if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
L
Leonid Arsh 已提交
938 939 940 941 942 943
		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
		return;
	}

	if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
L
Linus Torvalds 已提交
944
		return;
L
Leonid Arsh 已提交
945
	}
L
Linus Torvalds 已提交
946

947
	if (level == IPOIB_FLUSH_HEAVY) {
948 949 950
		if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
			clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
			ipoib_ib_dev_down(dev, 0);
951
			ipoib_ib_dev_stop(dev, 0);
952 953
			if (ipoib_pkey_dev_delay_open(dev))
				return;
954 955 956
		}

		/* restart QP only if P_Key index is changed */
957 958
		if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
		    new_index == priv->pkey_index) {
959 960 961 962 963 964
			ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
			return;
		}
		priv->pkey_index = new_index;
	}

965 966 967 968
	if (level == IPOIB_FLUSH_LIGHT) {
		ipoib_mark_paths_invalid(dev);
		ipoib_mcast_dev_flush(dev);
	}
L
Linus Torvalds 已提交
969

970 971
	if (level >= IPOIB_FLUSH_NORMAL)
		ipoib_ib_dev_down(dev, 0);
L
Linus Torvalds 已提交
972

973
	if (level == IPOIB_FLUSH_HEAVY) {
974 975 976 977
		ipoib_ib_dev_stop(dev, 0);
		ipoib_ib_dev_open(dev);
	}

L
Linus Torvalds 已提交
978 979 980 981
	/*
	 * The device could have been brought down between the start and when
	 * we get here, don't bring it back up if it's not configured up
	 */
982
	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
983 984
		if (level >= IPOIB_FLUSH_NORMAL)
			ipoib_ib_dev_up(dev);
D
David Howells 已提交
985
		ipoib_mcast_restart_task(&priv->restart_task);
986
	}
987
}
L
Linus Torvalds 已提交
988

989 990 991 992 993 994 995 996 997
void ipoib_ib_dev_flush_light(struct work_struct *work)
{
	struct ipoib_dev_priv *priv =
		container_of(work, struct ipoib_dev_priv, flush_light);

	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
}

void ipoib_ib_dev_flush_normal(struct work_struct *work)
998 999
{
	struct ipoib_dev_priv *priv =
1000
		container_of(work, struct ipoib_dev_priv, flush_normal);
1001

1002
	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
1003
}
1004

1005
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1006 1007
{
	struct ipoib_dev_priv *priv =
1008
		container_of(work, struct ipoib_dev_priv, flush_heavy);
1009

1010
	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
L
Linus Torvalds 已提交
1011 1012 1013 1014 1015 1016 1017 1018
}

void ipoib_ib_dev_cleanup(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	ipoib_dbg(priv, "cleaning up ib_dev\n");

1019
	ipoib_mcast_stop_thread(dev, 1);
1020
	ipoib_mcast_dev_flush(dev);
L
Linus Torvalds 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034

	ipoib_transport_dev_cleanup(dev);
}

/*
 * Delayed P_Key Assigment Interim Support
 *
 * The following is initial implementation of delayed P_Key assigment
 * mechanism. It is using the same approach implemented for the multicast
 * group join. The single goal of this implementation is to quickly address
 * Bug #2507. This implementation will probably be removed when the P_Key
 * change async notification is available.
 */

D
David Howells 已提交
1035
void ipoib_pkey_poll(struct work_struct *work)
L
Linus Torvalds 已提交
1036
{
D
David Howells 已提交
1037
	struct ipoib_dev_priv *priv =
1038
		container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
D
David Howells 已提交
1039
	struct net_device *dev = priv->dev;
L
Linus Torvalds 已提交
1040 1041 1042 1043 1044 1045

	ipoib_pkey_dev_check_presence(dev);

	if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
		ipoib_open(dev);
	else {
1046
		mutex_lock(&pkey_mutex);
L
Linus Torvalds 已提交
1047 1048
		if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
			queue_delayed_work(ipoib_workqueue,
1049
					   &priv->pkey_poll_task,
L
Linus Torvalds 已提交
1050
					   HZ);
1051
		mutex_unlock(&pkey_mutex);
L
Linus Torvalds 已提交
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	}
}

int ipoib_pkey_dev_delay_open(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	/* Look for the interface pkey value in the IB Port P_Key table and */
	/* set the interface pkey assigment flag                            */
	ipoib_pkey_dev_check_presence(dev);

	/* P_Key value not assigned yet - start polling */
	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
1065
		mutex_lock(&pkey_mutex);
L
Linus Torvalds 已提交
1066 1067
		clear_bit(IPOIB_PKEY_STOP, &priv->flags);
		queue_delayed_work(ipoib_workqueue,
1068
				   &priv->pkey_poll_task,
L
Linus Torvalds 已提交
1069
				   HZ);
1070
		mutex_unlock(&pkey_mutex);
L
Linus Torvalds 已提交
1071 1072 1073 1074 1075
		return 1;
	}

	return 0;
}