rxe_cq.c 3.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
M
Moni Shoua 已提交
2 3 4 5
/*
 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 */
Z
Zhu Yanjun 已提交
6
#include <linux/vmalloc.h>
M
Moni Shoua 已提交
7 8 9 10 11
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"

int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
12
		    int cqe, int comp_vector)
M
Moni Shoua 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
{
	int count;

	if (cqe <= 0) {
		pr_warn("cqe(%d) <= 0\n", cqe);
		goto err1;
	}

	if (cqe > rxe->attr.max_cqe) {
		pr_warn("cqe(%d) > max_cqe(%d)\n",
			cqe, rxe->attr.max_cqe);
		goto err1;
	}

	if (cq) {
		count = queue_count(cq->queue);
		if (cqe < count) {
			pr_warn("cqe(%d) < current # elements in queue (%d)",
				cqe, count);
			goto err1;
		}
	}

	return 0;

err1:
	return -EINVAL;
}

42
static void rxe_send_complete(struct tasklet_struct *t)
M
Moni Shoua 已提交
43
{
44
	struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
45 46 47 48 49 50 51 52
	unsigned long flags;

	spin_lock_irqsave(&cq->cq_lock, flags);
	if (cq->is_dying) {
		spin_unlock_irqrestore(&cq->cq_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cq->cq_lock, flags);
M
Moni Shoua 已提交
53 54 55 56 57

	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}

int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
58
		     int comp_vector, struct ib_udata *udata,
59
		     struct rxe_create_cq_resp __user *uresp)
M
Moni Shoua 已提交
60 61 62 63 64 65 66 67 68 69
{
	int err;

	cq->queue = rxe_queue_init(rxe, &cqe,
				   sizeof(struct rxe_cqe));
	if (!cq->queue) {
		pr_warn("unable to create cq\n");
		return -ENOMEM;
	}

70
	err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
71
			   cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
M
Moni Shoua 已提交
72
	if (err) {
Z
Zhu Yanjun 已提交
73
		vfree(cq->queue->buf);
M
Moni Shoua 已提交
74 75 76 77
		kfree(cq->queue);
		return err;
	}

78
	if (uresp)
M
Moni Shoua 已提交
79 80
		cq->is_user = 1;

81 82
	cq->is_dying = false;

83
	tasklet_setup(&cq->comp_task, rxe_send_complete);
M
Moni Shoua 已提交
84 85 86 87 88 89

	spin_lock_init(&cq->cq_lock);
	cq->ibcq.cqe = cqe;
	return 0;
}

90
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
91 92
			struct rxe_resize_cq_resp __user *uresp,
			struct ib_udata *udata)
M
Moni Shoua 已提交
93 94 95 96
{
	int err;

	err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
97
			       sizeof(struct rxe_cqe), udata,
98
			       uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
M
Moni Shoua 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
	if (!err)
		cq->ibcq.cqe = cqe;

	return err;
}

int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
	struct ib_event ev;
	unsigned long flags;

	spin_lock_irqsave(&cq->cq_lock, flags);

	if (unlikely(queue_full(cq->queue))) {
		spin_unlock_irqrestore(&cq->cq_lock, flags);
		if (cq->ibcq.event_handler) {
			ev.device = cq->ibcq.device;
			ev.element.cq = &cq->ibcq;
			ev.event = IB_EVENT_CQ_ERR;
			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
		}

		return -EBUSY;
	}

	memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));

	advance_producer(cq->queue);
	spin_unlock_irqrestore(&cq->cq_lock, flags);

	if ((cq->notify == IB_CQ_NEXT_COMP) ||
	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
		cq->notify = 0;
		tasklet_schedule(&cq->comp_task);
	}

	return 0;
}

138 139 140 141 142 143 144 145 146
void rxe_cq_disable(struct rxe_cq *cq)
{
	unsigned long flags;

	spin_lock_irqsave(&cq->cq_lock, flags);
	cq->is_dying = true;
	spin_unlock_irqrestore(&cq->cq_lock, flags);
}

147
void rxe_cq_cleanup(struct rxe_pool_entry *arg)
M
Moni Shoua 已提交
148
{
149
	struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
M
Moni Shoua 已提交
150 151 152 153

	if (cq->queue)
		rxe_queue_cleanup(cq->queue);
}