ipoib_ib.c 27.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 4 5
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/delay.h>
#include <linux/dma-mapping.h>
38
#include <linux/slab.h>
L
Linus Torvalds 已提交
39

E
Eli Cohen 已提交
40 41
#include <linux/ip.h>
#include <linux/tcp.h>
L
Linus Torvalds 已提交
42 43 44 45 46 47 48 49 50 51 52

#include "ipoib.h"

#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
static int data_debug_level;

module_param(data_debug_level, int, 0644);
MODULE_PARM_DESC(data_debug_level,
		 "Enable data path debug tracing if > 0");
#endif

53
static DEFINE_MUTEX(pkey_mutex);
L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
				 struct ib_pd *pd, struct ib_ah_attr *attr)
{
	struct ipoib_ah *ah;

	ah = kmalloc(sizeof *ah, GFP_KERNEL);
	if (!ah)
		return NULL;

	ah->dev       = dev;
	ah->last_send = 0;
	kref_init(&ah->ref);

	ah->ah = ib_create_ah(pd, attr);
	if (IS_ERR(ah->ah)) {
		kfree(ah);
		ah = NULL;
	} else
		ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);

	return ah;
}

void ipoib_free_ah(struct kref *kref)
{
	struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
	struct ipoib_dev_priv *priv = netdev_priv(ah->dev);

	unsigned long flags;

85 86 87
	spin_lock_irqsave(&priv->lock, flags);
	list_add_tail(&ah->list, &priv->dead_ahs);
	spin_unlock_irqrestore(&priv->lock, flags);
L
Linus Torvalds 已提交
88 89
}

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
				  u64 mapping[IPOIB_UD_RX_SG])
{
	if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
		ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
				    DMA_FROM_DEVICE);
		ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
				  DMA_FROM_DEVICE);
	} else
		ib_dma_unmap_single(priv->ca, mapping[0],
				    IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
				    DMA_FROM_DEVICE);
}

static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
				   struct sk_buff *skb,
				   unsigned int length)
{
	if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
		unsigned int size;
		/*
		 * There is only two buffers needed for max_payload = 4K,
		 * first buf size is IPOIB_UD_HEAD_SIZE
		 */
		skb->tail += IPOIB_UD_HEAD_SIZE;
		skb->len  += length;

		size = length - IPOIB_UD_HEAD_SIZE;

		frag->size     = size;
		skb->data_len += size;
		skb->truesize += size;
	} else
		skb_put(skb, length);

}

128
static int ipoib_ib_post_receive(struct net_device *dev, int id)
L
Linus Torvalds 已提交
129
{
130
	struct ipoib_dev_priv *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
131
	struct ib_recv_wr *bad_wr;
132 133
	int ret;

134 135 136
	priv->rx_wr.wr_id   = id | IPOIB_OP_RECV;
	priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
	priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
137 138


139
	ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
140 141
	if (unlikely(ret)) {
		ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
142
		ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
143 144 145
		dev_kfree_skb_any(priv->rx_ring[id].skb);
		priv->rx_ring[id].skb = NULL;
	}
L
Linus Torvalds 已提交
146

147
	return ret;
L
Linus Torvalds 已提交
148 149
}

150
static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
L
Linus Torvalds 已提交
151 152 153
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct sk_buff *skb;
154 155
	int buf_size;
	u64 *mapping;
L
Linus Torvalds 已提交
156

157 158 159 160 161 162 163 164
	if (ipoib_ud_need_sg(priv->max_ib_mtu))
		buf_size = IPOIB_UD_HEAD_SIZE;
	else
		buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);

	skb = dev_alloc_skb(buf_size + 4);
	if (unlikely(!skb))
		return NULL;
165 166 167 168 169 170 171 172

	/*
	 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
	 * header.  So we need 4 more bytes to get to 48 and align the
	 * IP header to a multiple of 16.
	 */
	skb_reserve(skb, 4);

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	mapping = priv->rx_ring[id].mapping;
	mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
				       DMA_FROM_DEVICE);
	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
		goto error;

	if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
		struct page *page = alloc_page(GFP_ATOMIC);
		if (!page)
			goto partial_error;
		skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
		mapping[1] =
			ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
					0, PAGE_SIZE, DMA_FROM_DEVICE);
		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
			goto partial_error;
L
Linus Torvalds 已提交
189 190
	}

191 192
	priv->rx_ring[id].skb = skb;
	return skb;
193

194 195 196 197 198
partial_error:
	ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
error:
	dev_kfree_skb_any(skb);
	return NULL;
L
Linus Torvalds 已提交
199 200 201 202 203 204 205
}

static int ipoib_ib_post_receives(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int i;

206
	for (i = 0; i < ipoib_recvq_size; ++i) {
207
		if (!ipoib_alloc_rx_skb(dev, i)) {
208 209 210
			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
			return -ENOMEM;
		}
L
Linus Torvalds 已提交
211 212 213 214 215 216 217 218 219
		if (ipoib_ib_post_receive(dev, i)) {
			ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
			return -EIO;
		}
	}

	return 0;
}

220
static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
L
Linus Torvalds 已提交
221 222
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
223 224
	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
	struct sk_buff *skb;
225
	u64 mapping[IPOIB_UD_RX_SG];
226
	union ib_gid *dgid;
L
Linus Torvalds 已提交
227

228 229
	ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
		       wr_id, wc->status);
L
Linus Torvalds 已提交
230

231 232 233 234 235 236 237 238 239 240 241 242 243
	if (unlikely(wr_id >= ipoib_recvq_size)) {
		ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
			   wr_id, ipoib_recvq_size);
		return;
	}

	skb  = priv->rx_ring[wr_id].skb;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		if (wc->status != IB_WC_WR_FLUSH_ERR)
			ipoib_warn(priv, "failed recv event "
				   "(status=%d, wrid=%d vend_err %x)\n",
				   wc->status, wr_id, wc->vendor_err);
244
		ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
245 246 247 248
		dev_kfree_skb_any(skb);
		priv->rx_ring[wr_id].skb = NULL;
		return;
	}
L
Linus Torvalds 已提交
249

250 251 252 253 254 255 256
	/*
	 * Drop packets that this interface sent, ie multicast packets
	 * that the HCA has replicated.
	 */
	if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
		goto repost;

257 258 259
	memcpy(mapping, priv->rx_ring[wr_id].mapping,
	       IPOIB_UD_RX_SG * sizeof *mapping);

260 261 262 263
	/*
	 * If we can't allocate a new RX buffer, dump
	 * this packet and reuse the old buffer.
	 */
264
	if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
265
		++dev->stats.rx_dropped;
266 267 268 269 270 271
		goto repost;
	}

	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
		       wc->byte_len, wc->slid);

272 273
	ipoib_ud_dma_unmap_rx(priv, mapping);
	ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
274

275 276 277 278 279 280 281 282 283 284
	/* First byte of dgid signals multicast when 0xff */
	dgid = &((struct ib_grh *)skb->data)->dgid;

	if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
		skb->pkt_type = PACKET_HOST;
	else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
		skb->pkt_type = PACKET_BROADCAST;
	else
		skb->pkt_type = PACKET_MULTICAST;

285 286
	skb_pull(skb, IB_GRH_BYTES);

287 288 289 290
	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
	skb_reset_mac_header(skb);
	skb_pull(skb, IPOIB_ENCAP_LEN);

291 292
	++dev->stats.rx_packets;
	dev->stats.rx_bytes += skb->len;
293 294

	skb->dev = dev;
295 296 297
	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
		skb->ip_summed = CHECKSUM_UNNECESSARY;

O
Or Gerlitz 已提交
298
	netif_receive_skb(skb);
L
Linus Torvalds 已提交
299

300 301 302 303 304
repost:
	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
		ipoib_warn(priv, "ipoib_ib_post_receive failed "
			   "for buf %d\n", wr_id);
}
L
Linus Torvalds 已提交
305

E
Eli Cohen 已提交
306 307 308 309 310 311
static int ipoib_dma_map_tx(struct ib_device *ca,
			    struct ipoib_tx_buf *tx_req)
{
	struct sk_buff *skb = tx_req->skb;
	u64 *mapping = tx_req->mapping;
	int i;
E
Eli Cohen 已提交
312
	int off;
E
Eli Cohen 已提交
313

E
Eli Cohen 已提交
314 315 316 317 318 319 320 321 322
	if (skb_headlen(skb)) {
		mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
					       DMA_TO_DEVICE);
		if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
			return -EIO;

		off = 1;
	} else
		off = 0;
E
Eli Cohen 已提交
323 324 325

	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
E
Eli Cohen 已提交
326
		mapping[i + off] = ib_dma_map_page(ca, frag->page,
E
Eli Cohen 已提交
327 328
						 frag->page_offset, frag->size,
						 DMA_TO_DEVICE);
E
Eli Cohen 已提交
329
		if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
E
Eli Cohen 已提交
330 331 332 333 334 335 336
			goto partial_error;
	}
	return 0;

partial_error:
	for (; i > 0; --i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
E
Eli Cohen 已提交
337
		ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
E
Eli Cohen 已提交
338
	}
E
Eli Cohen 已提交
339 340 341 342

	if (off)
		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);

E
Eli Cohen 已提交
343 344 345 346 347 348 349 350 351
	return -EIO;
}

static void ipoib_dma_unmap_tx(struct ib_device *ca,
			       struct ipoib_tx_buf *tx_req)
{
	struct sk_buff *skb = tx_req->skb;
	u64 *mapping = tx_req->mapping;
	int i;
E
Eli Cohen 已提交
352
	int off;
E
Eli Cohen 已提交
353

E
Eli Cohen 已提交
354 355 356 357 358
	if (skb_headlen(skb)) {
		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
		off = 1;
	} else
		off = 0;
E
Eli Cohen 已提交
359 360 361

	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
E
Eli Cohen 已提交
362
		ib_dma_unmap_page(ca, mapping[i + off], frag->size,
E
Eli Cohen 已提交
363 364 365 366
				  DMA_TO_DEVICE);
	}
}

367 368 369 370 371
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	unsigned int wr_id = wc->wr_id;
	struct ipoib_tx_buf *tx_req;
L
Linus Torvalds 已提交
372

373 374
	ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
		       wr_id, wc->status);
L
Linus Torvalds 已提交
375

376 377 378 379
	if (unlikely(wr_id >= ipoib_sendq_size)) {
		ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
			   wr_id, ipoib_sendq_size);
		return;
L
Linus Torvalds 已提交
380
	}
381 382 383

	tx_req = &priv->tx_ring[wr_id];

E
Eli Cohen 已提交
384
	ipoib_dma_unmap_tx(priv->ca, tx_req);
385

386 387
	++dev->stats.tx_packets;
	dev->stats.tx_bytes += tx_req->skb->len;
388 389 390 391

	dev_kfree_skb_any(tx_req->skb);

	++priv->tx_tail;
392 393 394
	if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
	    netif_queue_stopped(dev) &&
	    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
395 396 397 398 399 400 401 402 403
		netif_wake_queue(dev);

	if (wc->status != IB_WC_SUCCESS &&
	    wc->status != IB_WC_WR_FLUSH_ERR)
		ipoib_warn(priv, "failed send event "
			   "(status=%d, wrid=%d vend_err %x)\n",
			   wc->status, wr_id, wc->vendor_err);
}

404 405 406 407 408 409 410 411 412 413 414
static int poll_tx(struct ipoib_dev_priv *priv)
{
	int n, i;

	n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
	for (i = 0; i < n; ++i)
		ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);

	return n == MAX_SEND_CQE;
}

415
int ipoib_poll(struct napi_struct *napi, int budget)
416
{
417 418
	struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
	struct net_device *dev = priv->dev;
R
Roland Dreier 已提交
419 420 421 422 423 424
	int done;
	int t;
	int n, i;

	done  = 0;

425 426 427 428
poll_more:
	while (done < budget) {
		int max = (budget - done);

R
Roland Dreier 已提交
429
		t = min(IPOIB_NUM_WC, max);
430
		n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
R
Roland Dreier 已提交
431

432
		for (i = 0; i < n; i++) {
R
Roland Dreier 已提交
433 434
			struct ib_wc *wc = priv->ibwc + i;

435
			if (wc->wr_id & IPOIB_OP_RECV) {
R
Roland Dreier 已提交
436
				++done;
437 438 439 440
				if (wc->wr_id & IPOIB_OP_CM)
					ipoib_cm_handle_rx_wc(dev, wc);
				else
					ipoib_ib_handle_rx_wc(dev, wc);
441 442
			} else
				ipoib_cm_handle_tx_wc(priv->dev, wc);
R
Roland Dreier 已提交
443 444
		}

445
		if (n != t)
R
Roland Dreier 已提交
446 447 448
			break;
	}

449
	if (done < budget) {
450
		napi_complete(napi);
451
		if (unlikely(ib_req_notify_cq(priv->recv_cq,
R
Roland Dreier 已提交
452 453
					      IB_CQ_NEXT_COMP |
					      IB_CQ_REPORT_MISSED_EVENTS)) &&
454
		    napi_reschedule(napi))
455
			goto poll_more;
R
Roland Dreier 已提交
456 457
	}

458
	return done;
L
Linus Torvalds 已提交
459 460 461 462
}

void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
{
463 464 465
	struct net_device *dev = dev_ptr;
	struct ipoib_dev_priv *priv = netdev_priv(dev);

466
	napi_schedule(&priv->napi);
L
Linus Torvalds 已提交
467 468
}

469 470 471 472
static void drain_tx_cq(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

473
	netif_tx_lock(dev);
474 475 476 477 478 479
	while (poll_tx(priv))
		; /* nothing */

	if (netif_queue_stopped(dev))
		mod_timer(&priv->poll_timer, jiffies + 1);

480
	netif_tx_unlock(dev);
481 482 483 484
}

void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
{
485 486 487
	struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);

	mod_timer(&priv->poll_timer, jiffies);
488 489
}

L
Linus Torvalds 已提交
490 491 492
static inline int post_send(struct ipoib_dev_priv *priv,
			    unsigned int wr_id,
			    struct ib_ah *address, u32 qpn,
E
Eli Cohen 已提交
493 494
			    struct ipoib_tx_buf *tx_req,
			    void *head, int hlen)
L
Linus Torvalds 已提交
495 496
{
	struct ib_send_wr *bad_wr;
E
Eli Cohen 已提交
497 498 499 500 501 502 503 504 505 506 507 508
	int i, off;
	struct sk_buff *skb = tx_req->skb;
	skb_frag_t *frags = skb_shinfo(skb)->frags;
	int nr_frags = skb_shinfo(skb)->nr_frags;
	u64 *mapping = tx_req->mapping;

	if (skb_headlen(skb)) {
		priv->tx_sge[0].addr         = mapping[0];
		priv->tx_sge[0].length       = skb_headlen(skb);
		off = 1;
	} else
		off = 0;
L
Linus Torvalds 已提交
509

E
Eli Cohen 已提交
510
	for (i = 0; i < nr_frags; ++i) {
E
Eli Cohen 已提交
511 512
		priv->tx_sge[i + off].addr = mapping[i + off];
		priv->tx_sge[i + off].length = frags[i].size;
E
Eli Cohen 已提交
513
	}
E
Eli Cohen 已提交
514
	priv->tx_wr.num_sge	     = nr_frags + off;
E
Eli Cohen 已提交
515 516 517
	priv->tx_wr.wr_id 	     = wr_id;
	priv->tx_wr.wr.ud.remote_qpn = qpn;
	priv->tx_wr.wr.ud.ah 	     = address;
L
Linus Torvalds 已提交
518

E
Eli Cohen 已提交
519 520 521 522 523 524 525 526
	if (head) {
		priv->tx_wr.wr.ud.mss	 = skb_shinfo(skb)->gso_size;
		priv->tx_wr.wr.ud.header = head;
		priv->tx_wr.wr.ud.hlen	 = hlen;
		priv->tx_wr.opcode	 = IB_WR_LSO;
	} else
		priv->tx_wr.opcode	 = IB_WR_SEND;

L
Linus Torvalds 已提交
527 528 529 530 531 532 533
	return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
}

void ipoib_send(struct net_device *dev, struct sk_buff *skb,
		struct ipoib_ah *address, u32 qpn)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
534
	struct ipoib_tx_buf *tx_req;
535
	int hlen, rc;
E
Eli Cohen 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
	void *phead;

	if (skb_is_gso(skb)) {
		hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
		phead = skb->data;
		if (unlikely(!skb_pull(skb, hlen))) {
			ipoib_warn(priv, "linear data too small\n");
			++dev->stats.tx_dropped;
			++dev->stats.tx_errors;
			dev_kfree_skb_any(skb);
			return;
		}
	} else {
		if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
			ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
				   skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
			++dev->stats.tx_dropped;
			++dev->stats.tx_errors;
			ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
			return;
		}
		phead = NULL;
		hlen  = 0;
L
Linus Torvalds 已提交
559 560 561 562 563 564 565 566 567 568 569 570
	}

	ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
		       skb->len, address, qpn);

	/*
	 * We put the skb into the tx_ring _before_ we call post_send()
	 * because it's entirely possible that the completion handler will
	 * run before we execute anything after the post_send().  That
	 * means we have to make sure everything is properly recorded and
	 * our state is consistent before we call post_send().
	 */
571
	tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
L
Linus Torvalds 已提交
572
	tx_req->skb = skb;
E
Eli Cohen 已提交
573
	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
574
		++dev->stats.tx_errors;
575 576 577
		dev_kfree_skb_any(skb);
		return;
	}
L
Linus Torvalds 已提交
578

579 580 581 582 583
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
	else
		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;

584 585 586 587 588 589 590
	if (++priv->tx_outstanding == ipoib_sendq_size) {
		ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
		if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
			ipoib_warn(priv, "request notify on send CQ failed\n");
		netif_stop_queue(dev);
	}

591 592 593 594
	rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
		       address->ah, qpn, tx_req, phead, hlen);
	if (unlikely(rc)) {
		ipoib_warn(priv, "post_send failed, error %d\n", rc);
595
		++dev->stats.tx_errors;
596
		--priv->tx_outstanding;
E
Eli Cohen 已提交
597
		ipoib_dma_unmap_tx(priv->ca, tx_req);
L
Linus Torvalds 已提交
598
		dev_kfree_skb_any(skb);
599 600
		if (netif_queue_stopped(dev))
			netif_wake_queue(dev);
L
Linus Torvalds 已提交
601 602 603 604 605
	} else {
		dev->trans_start = jiffies;

		address->last_send = priv->tx_head;
		++priv->tx_head;
606
		skb_orphan(skb);
L
Linus Torvalds 已提交
607 608

	}
609 610

	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
611 612
		while (poll_tx(priv))
			; /* nothing */
L
Linus Torvalds 已提交
613 614 615 616 617 618 619
}

static void __ipoib_reap_ah(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_ah *ah, *tah;
	LIST_HEAD(remove_list);
620 621 622 623
	unsigned long flags;

	netif_tx_lock_bh(dev);
	spin_lock_irqsave(&priv->lock, flags);
L
Linus Torvalds 已提交
624 625

	list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
626
		if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
L
Linus Torvalds 已提交
627
			list_del(&ah->list);
628 629
			ib_destroy_ah(ah->ah);
			kfree(ah);
L
Linus Torvalds 已提交
630
		}
631 632 633

	spin_unlock_irqrestore(&priv->lock, flags);
	netif_tx_unlock_bh(dev);
L
Linus Torvalds 已提交
634 635
}

D
David Howells 已提交
636
void ipoib_reap_ah(struct work_struct *work)
L
Linus Torvalds 已提交
637
{
D
David Howells 已提交
638 639 640
	struct ipoib_dev_priv *priv =
		container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
	struct net_device *dev = priv->dev;
L
Linus Torvalds 已提交
641 642 643 644

	__ipoib_reap_ah(dev);

	if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
645 646
		queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
				   round_jiffies_relative(HZ));
L
Linus Torvalds 已提交
647 648
}

649 650 651 652 653
static void ipoib_ib_tx_timer_func(unsigned long ctx)
{
	drain_tx_cq((struct net_device *)ctx);
}

L
Linus Torvalds 已提交
654 655 656 657 658
int ipoib_ib_dev_open(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int ret;

659 660 661 662 663 664 665
	if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
		ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
		clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
		return -1;
	}
	set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);

666
	ret = ipoib_init_qp(dev);
L
Linus Torvalds 已提交
667
	if (ret) {
668
		ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
L
Linus Torvalds 已提交
669 670 671 672 673 674
		return -1;
	}

	ret = ipoib_ib_post_receives(dev);
	if (ret) {
		ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
675
		ipoib_ib_dev_stop(dev, 1);
L
Linus Torvalds 已提交
676 677 678
		return -1;
	}

679 680
	ret = ipoib_cm_dev_open(dev);
	if (ret) {
681
		ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
682
		ipoib_ib_dev_stop(dev, 1);
683 684 685
		return -1;
	}

L
Linus Torvalds 已提交
686
	clear_bit(IPOIB_STOP_REAPER, &priv->flags);
687 688
	queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
			   round_jiffies_relative(HZ));
L
Linus Torvalds 已提交
689

690 691
	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
		napi_enable(&priv->napi);
L
Leonid Arsh 已提交
692

L
Linus Torvalds 已提交
693 694 695
	return 0;
}

L
Leonid Arsh 已提交
696 697 698 699 700
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	u16 pkey_index = 0;

701
	if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
L
Leonid Arsh 已提交
702 703 704 705 706
		clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
	else
		set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
}

L
Linus Torvalds 已提交
707 708 709 710
int ipoib_ib_dev_up(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

L
Leonid Arsh 已提交
711 712 713 714 715 716 717
	ipoib_pkey_dev_check_presence(dev);

	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
		ipoib_dbg(priv, "PKEY is not assigned.\n");
		return 0;
	}

L
Linus Torvalds 已提交
718 719 720 721 722
	set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);

	return ipoib_mcast_start_thread(dev);
}

723
int ipoib_ib_dev_down(struct net_device *dev, int flush)
L
Linus Torvalds 已提交
724 725 726 727 728 729 730 731 732 733
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	ipoib_dbg(priv, "downing ib_dev\n");

	clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
	netif_carrier_off(dev);

	/* Shutdown the P_Key thread if still active */
	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
734
		mutex_lock(&pkey_mutex);
L
Linus Torvalds 已提交
735
		set_bit(IPOIB_PKEY_STOP, &priv->flags);
736
		cancel_delayed_work(&priv->pkey_poll_task);
737
		mutex_unlock(&pkey_mutex);
738 739
		if (flush)
			flush_workqueue(ipoib_workqueue);
L
Linus Torvalds 已提交
740 741
	}

742
	ipoib_mcast_stop_thread(dev, flush);
L
Linus Torvalds 已提交
743 744 745 746 747 748 749 750 751 752 753 754 755
	ipoib_mcast_dev_flush(dev);

	ipoib_flush_paths(dev);

	return 0;
}

static int recvs_pending(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int pending = 0;
	int i;

756
	for (i = 0; i < ipoib_recvq_size; ++i)
L
Linus Torvalds 已提交
757 758 759 760 761 762
		if (priv->rx_ring[i].skb)
			++pending;

	return pending;
}

763 764 765 766
void ipoib_drain_cq(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int i, n;
767 768 769 770 771 772 773 774

	/*
	 * We call completion handling routines that expect to be
	 * called from the BH-disabled NAPI poll context, so disable
	 * BHs here too.
	 */
	local_bh_disable();

775
	do {
776
		n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
777
		for (i = 0; i < n; ++i) {
778 779 780 781 782 783 784 785
			/*
			 * Convert any successful completions to flush
			 * errors to avoid passing packets up the
			 * stack after bringing the device down.
			 */
			if (priv->ibwc[i].status == IB_WC_SUCCESS)
				priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;

786 787 788 789 790
			if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
				if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
					ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
				else
					ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
791 792
			} else
				ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
793 794
		}
	} while (n == IPOIB_NUM_WC);
795 796 797

	while (poll_tx(priv))
		; /* nothing */
798 799

	local_bh_enable();
800 801
}

802
int ipoib_ib_dev_stop(struct net_device *dev, int flush)
L
Linus Torvalds 已提交
803 804 805 806
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ib_qp_attr qp_attr;
	unsigned long begin;
807
	struct ipoib_tx_buf *tx_req;
808
	int i;
L
Linus Torvalds 已提交
809

810 811
	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
		napi_disable(&priv->napi);
L
Leonid Arsh 已提交
812

813 814
	ipoib_cm_dev_stop(dev);

815 816 817 818
	/*
	 * Move our QP to the error state and then reinitialize in
	 * when all work requests have completed or have been flushed.
	 */
L
Linus Torvalds 已提交
819
	qp_attr.qp_state = IB_QPS_ERR;
820
	if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
L
Linus Torvalds 已提交
821 822 823 824 825 826 827 828 829 830 831 832 833 834
		ipoib_warn(priv, "Failed to modify QP to ERROR state\n");

	/* Wait for all sends and receives to complete */
	begin = jiffies;

	while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
		if (time_after(jiffies, begin + 5 * HZ)) {
			ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
				   priv->tx_head - priv->tx_tail, recvs_pending(dev));

			/*
			 * assume the HW is wedged and just free up
			 * all our pending work requests.
			 */
835
			while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
L
Linus Torvalds 已提交
836
				tx_req = &priv->tx_ring[priv->tx_tail &
837
							(ipoib_sendq_size - 1)];
E
Eli Cohen 已提交
838
				ipoib_dma_unmap_tx(priv->ca, tx_req);
L
Linus Torvalds 已提交
839 840
				dev_kfree_skb_any(tx_req->skb);
				++priv->tx_tail;
841
				--priv->tx_outstanding;
L
Linus Torvalds 已提交
842 843
			}

844 845 846 847 848 849
			for (i = 0; i < ipoib_recvq_size; ++i) {
				struct ipoib_rx_buf *rx_req;

				rx_req = &priv->rx_ring[i];
				if (!rx_req->skb)
					continue;
850 851
				ipoib_ud_dma_unmap_rx(priv,
						      priv->rx_ring[i].mapping);
852 853 854
				dev_kfree_skb_any(rx_req->skb);
				rx_req->skb = NULL;
			}
L
Linus Torvalds 已提交
855 856 857 858

			goto timeout;
		}

859
		ipoib_drain_cq(dev);
R
Roland Dreier 已提交
860

L
Linus Torvalds 已提交
861 862 863 864 865 866
		msleep(1);
	}

	ipoib_dbg(priv, "All sends and receives done.\n");

timeout:
867
	del_timer_sync(&priv->poll_timer);
L
Linus Torvalds 已提交
868
	qp_attr.qp_state = IB_QPS_RESET;
869
	if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
L
Linus Torvalds 已提交
870 871 872 873 874
		ipoib_warn(priv, "Failed to modify QP to RESET state\n");

	/* Wait for all AHs to be reaped */
	set_bit(IPOIB_STOP_REAPER, &priv->flags);
	cancel_delayed_work(&priv->ah_reap_task);
875 876
	if (flush)
		flush_workqueue(ipoib_workqueue);
L
Linus Torvalds 已提交
877 878 879 880 881 882 883 884 885 886 887 888 889 890

	begin = jiffies;

	while (!list_empty(&priv->dead_ahs)) {
		__ipoib_reap_ah(dev);

		if (time_after(jiffies, begin + HZ)) {
			ipoib_warn(priv, "timing out; will leak address handles\n");
			break;
		}

		msleep(1);
	}

891
	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
R
Roland Dreier 已提交
892

L
Linus Torvalds 已提交
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
	return 0;
}

int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	priv->ca = ca;
	priv->port = port;
	priv->qp = NULL;

	if (ipoib_transport_dev_init(dev, ca)) {
		printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
		return -ENODEV;
	}

909 910 911
	setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
		    (unsigned long) dev);

L
Linus Torvalds 已提交
912 913 914 915 916 917 918 919 920 921
	if (dev->flags & IFF_UP) {
		if (ipoib_ib_dev_open(dev)) {
			ipoib_transport_dev_cleanup(dev);
			return -ENODEV;
		}
	}

	return 0;
}

922 923
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
				enum ipoib_flush_level level)
L
Linus Torvalds 已提交
924
{
925
	struct ipoib_dev_priv *cpriv;
D
David Howells 已提交
926
	struct net_device *dev = priv->dev;
927 928 929
	u16 new_index;

	mutex_lock(&priv->vlan_mutex);
L
Linus Torvalds 已提交
930

931 932 933 934 935
	/*
	 * Flush any child interfaces too -- they might be up even if
	 * the parent is down.
	 */
	list_for_each_entry(cpriv, &priv->child_intfs, list)
936
		__ipoib_ib_dev_flush(cpriv, level);
937 938 939 940

	mutex_unlock(&priv->vlan_mutex);

	if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
L
Leonid Arsh 已提交
941 942 943 944 945 946
		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
		return;
	}

	if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
L
Linus Torvalds 已提交
947
		return;
L
Leonid Arsh 已提交
948
	}
L
Linus Torvalds 已提交
949

950
	if (level == IPOIB_FLUSH_HEAVY) {
951 952 953
		if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
			clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
			ipoib_ib_dev_down(dev, 0);
954
			ipoib_ib_dev_stop(dev, 0);
955 956
			if (ipoib_pkey_dev_delay_open(dev))
				return;
957 958 959
		}

		/* restart QP only if P_Key index is changed */
960 961
		if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
		    new_index == priv->pkey_index) {
962 963 964 965 966 967
			ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
			return;
		}
		priv->pkey_index = new_index;
	}

968 969 970 971
	if (level == IPOIB_FLUSH_LIGHT) {
		ipoib_mark_paths_invalid(dev);
		ipoib_mcast_dev_flush(dev);
	}
L
Linus Torvalds 已提交
972

973 974
	if (level >= IPOIB_FLUSH_NORMAL)
		ipoib_ib_dev_down(dev, 0);
L
Linus Torvalds 已提交
975

976
	if (level == IPOIB_FLUSH_HEAVY) {
977 978 979 980
		ipoib_ib_dev_stop(dev, 0);
		ipoib_ib_dev_open(dev);
	}

L
Linus Torvalds 已提交
981 982 983 984
	/*
	 * The device could have been brought down between the start and when
	 * we get here, don't bring it back up if it's not configured up
	 */
985
	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
986 987
		if (level >= IPOIB_FLUSH_NORMAL)
			ipoib_ib_dev_up(dev);
D
David Howells 已提交
988
		ipoib_mcast_restart_task(&priv->restart_task);
989
	}
990
}
L
Linus Torvalds 已提交
991

992 993 994 995 996 997 998 999 1000
void ipoib_ib_dev_flush_light(struct work_struct *work)
{
	struct ipoib_dev_priv *priv =
		container_of(work, struct ipoib_dev_priv, flush_light);

	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
}

void ipoib_ib_dev_flush_normal(struct work_struct *work)
1001 1002
{
	struct ipoib_dev_priv *priv =
1003
		container_of(work, struct ipoib_dev_priv, flush_normal);
1004

1005
	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
1006
}
1007

1008
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1009 1010
{
	struct ipoib_dev_priv *priv =
1011
		container_of(work, struct ipoib_dev_priv, flush_heavy);
1012

1013
	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
L
Linus Torvalds 已提交
1014 1015 1016 1017 1018 1019 1020 1021
}

void ipoib_ib_dev_cleanup(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	ipoib_dbg(priv, "cleaning up ib_dev\n");

1022
	ipoib_mcast_stop_thread(dev, 1);
1023
	ipoib_mcast_dev_flush(dev);
L
Linus Torvalds 已提交
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037

	ipoib_transport_dev_cleanup(dev);
}

/*
 * Delayed P_Key Assigment Interim Support
 *
 * The following is initial implementation of delayed P_Key assigment
 * mechanism. It is using the same approach implemented for the multicast
 * group join. The single goal of this implementation is to quickly address
 * Bug #2507. This implementation will probably be removed when the P_Key
 * change async notification is available.
 */

D
David Howells 已提交
1038
void ipoib_pkey_poll(struct work_struct *work)
L
Linus Torvalds 已提交
1039
{
D
David Howells 已提交
1040
	struct ipoib_dev_priv *priv =
1041
		container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
D
David Howells 已提交
1042
	struct net_device *dev = priv->dev;
L
Linus Torvalds 已提交
1043 1044 1045 1046 1047 1048

	ipoib_pkey_dev_check_presence(dev);

	if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
		ipoib_open(dev);
	else {
1049
		mutex_lock(&pkey_mutex);
L
Linus Torvalds 已提交
1050 1051
		if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
			queue_delayed_work(ipoib_workqueue,
1052
					   &priv->pkey_poll_task,
L
Linus Torvalds 已提交
1053
					   HZ);
1054
		mutex_unlock(&pkey_mutex);
L
Linus Torvalds 已提交
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	}
}

int ipoib_pkey_dev_delay_open(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	/* Look for the interface pkey value in the IB Port P_Key table and */
	/* set the interface pkey assigment flag                            */
	ipoib_pkey_dev_check_presence(dev);

	/* P_Key value not assigned yet - start polling */
	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
1068
		mutex_lock(&pkey_mutex);
L
Linus Torvalds 已提交
1069 1070
		clear_bit(IPOIB_PKEY_STOP, &priv->flags);
		queue_delayed_work(ipoib_workqueue,
1071
				   &priv->pkey_poll_task,
L
Linus Torvalds 已提交
1072
				   HZ);
1073
		mutex_unlock(&pkey_mutex);
L
Linus Torvalds 已提交
1074 1075 1076 1077 1078
		return 1;
	}

	return 0;
}