chcr_core.c 6.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/**
 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
 *
 * Copyright (C) 2011-2016 Chelsio Communications.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Written and Maintained by:
 * Manoj Malviya (manojmalviya@chelsio.com)
 * Atul Gupta (atul.gupta@chelsio.com)
 * Jitendra Lulla (jlulla@chelsio.com)
 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
 * Harsh Jain (harsh@chelsio.com)
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>

#include <crypto/aes.h>
#include <crypto/hash.h>

#include "t4_msg.h"
#include "chcr_core.h"
#include "cxgb4_uld.h"

static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
static atomic_t dev_count;
32
static struct uld_ctx *ctx_rr;
33 34 35 36 37 38 39 40 41 42

typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);

static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
	[CPL_FW6_PLD] = cpl_fw6_pld_handler,
};

43
static struct cxgb4_uld_info chcr_uld_info = {
44
	.name = DRV_MODULE_NAME,
45
	.nrxq = MAX_ULD_QSETS,
46
	.ntxq = MAX_ULD_QSETS,
47 48 49 50
	.rxq_size = 1024,
	.add = chcr_uld_add,
	.state_change = chcr_uld_state_change,
	.rx_handler = chcr_uld_rx_handler,
A
Atul Gupta 已提交
51 52 53
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
	.tx_handler = chcr_uld_tx_handler,
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
54 55
};

56
struct uld_ctx *assign_chcr_device(void)
57
{
58
	struct uld_ctx *u_ctx = NULL;
59 60

	/*
61 62 63 64
	 * When multiple devices are present in system select
	 * device in round-robin fashion for crypto operations
	 * Although One session must use the same device to
	 * maintain request-response ordering.
65
	 */
66 67 68 69 70 71 72 73 74
	mutex_lock(&dev_mutex);
	if (!list_empty(&uld_ctx_list)) {
		u_ctx = ctx_rr;
		if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
			ctx_rr = list_first_entry(&uld_ctx_list,
						  struct uld_ctx,
						  entry);
		else
			ctx_rr = list_next_entry(ctx_rr, entry);
75 76
	}
	mutex_unlock(&dev_mutex);
77
	return u_ctx;
78 79 80 81 82 83 84 85 86 87 88 89 90 91
}

static int chcr_dev_add(struct uld_ctx *u_ctx)
{
	struct chcr_dev *dev;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENXIO;

	spin_lock_init(&dev->lock_chcr_dev);
	u_ctx->dev = dev;
	dev->u_ctx = u_ctx;
	atomic_inc(&dev_count);
92 93 94 95 96
	mutex_lock(&dev_mutex);
	list_add_tail(&u_ctx->entry, &uld_ctx_list);
	if (!ctx_rr)
		ctx_rr = u_ctx;
	mutex_unlock(&dev_mutex);
97 98 99 100 101
	return 0;
}

static int chcr_dev_remove(struct uld_ctx *u_ctx)
{
102 103 104 105 106 107 108 109 110 111 112
	if (ctx_rr == u_ctx) {
		if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
			ctx_rr = list_first_entry(&uld_ctx_list,
						  struct uld_ctx,
						  entry);
		else
			ctx_rr = list_next_entry(ctx_rr, entry);
	}
	list_del(&u_ctx->entry);
	if (list_empty(&uld_ctx_list))
		ctx_rr = NULL;
113 114 115 116 117 118 119 120 121 122 123 124 125
	kfree(u_ctx->dev);
	u_ctx->dev = NULL;
	atomic_dec(&dev_count);
	return 0;
}

static int cpl_fw6_pld_handler(struct chcr_dev *dev,
			       unsigned char *input)
{
	struct crypto_async_request *req;
	struct cpl_fw6_pld *fw6_pld;
	u32 ack_err_status = 0;
	int error_status = 0;
H
Harsh Jain 已提交
126
	struct adapter *adap = padap(dev);
127 128 129 130 131 132 133 134 135 136

	fw6_pld = (struct cpl_fw6_pld *)input;
	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
						    fw6_pld->data[1]);

	ack_err_status =
		ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
	if (ack_err_status) {
		if (CHK_MAC_ERR_BIT(ack_err_status) ||
		    CHK_PAD_ERR_BIT(ack_err_status))
H
Harsh Jain 已提交
137
			error_status = -EBADMSG;
H
Harsh Jain 已提交
138
		atomic_inc(&adap->chcr_stats.error);
139 140 141
	}
	/* call completion callback with failure status */
	if (req) {
H
Harsh Jain 已提交
142
		error_status = chcr_handle_resp(req, input, error_status);
143 144 145 146 147 148 149 150 151
	} else {
		pr_err("Incorrect request address from the firmware\n");
		return -EFAULT;
	}
	return 0;
}

int chcr_send_wr(struct sk_buff *skb)
{
152
	return cxgb4_crypto_send(skb->dev, skb);
153 154 155 156 157 158
}

static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
{
	struct uld_ctx *u_ctx;

H
Harsh Jain 已提交
159 160 161 162
	/* Create the device and add it in the device list */
	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
		return ERR_PTR(-EOPNOTSUPP);

163 164 165 166 167 168 169
	/* Create the device and add it in the device list */
	u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
	if (!u_ctx) {
		u_ctx = ERR_PTR(-ENOMEM);
		goto out;
	}
	u_ctx->lldi = *lld;
A
Atul Gupta 已提交
170 171 172 173
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
	if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
		chcr_add_xfrmops(lld);
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
174 175 176 177 178 179 180 181 182
out:
	return u_ctx;
}

int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
			const struct pkt_gl *pgl)
{
	struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
	struct chcr_dev *dev = u_ctx->dev;
183
	const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
184

185
	if (rpl->opcode != CPL_FW6_PLD) {
186 187 188 189 190
		pr_err("Unsupported opcode\n");
		return 0;
	}

	if (!pgl)
191
		work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
192
	else
193
		work_handlers[rpl->opcode](dev, pgl->va);
194 195 196
	return 0;
}

A
Atul Gupta 已提交
197 198 199 200 201 202 203
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
{
	return chcr_ipsec_xmit(skb, dev);
}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
	struct uld_ctx *u_ctx = handle;
	int ret = 0;

	switch (state) {
	case CXGB4_STATE_UP:
		if (!u_ctx->dev) {
			ret = chcr_dev_add(u_ctx);
			if (ret != 0)
				return ret;
		}
		if (atomic_read(&dev_count) == 1)
			ret = start_crypto();
		break;

	case CXGB4_STATE_DETACH:
		if (u_ctx->dev) {
			mutex_lock(&dev_mutex);
			chcr_dev_remove(u_ctx);
			mutex_unlock(&dev_mutex);
		}
		if (!atomic_read(&dev_count))
			stop_crypto();
		break;

	case CXGB4_STATE_START_RECOVERY:
	case CXGB4_STATE_DOWN:
	default:
		break;
	}
	return ret;
}

static int __init chcr_crypto_init(void)
{
240
	if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
241
		pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

	return 0;
}

static void __exit chcr_crypto_exit(void)
{
	struct uld_ctx *u_ctx, *tmp;

	if (atomic_read(&dev_count))
		stop_crypto();

	/* Remove all devices from list */
	mutex_lock(&dev_mutex);
	list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
		if (u_ctx->dev)
			chcr_dev_remove(u_ctx);
		kfree(u_ctx);
	}
	mutex_unlock(&dev_mutex);
261
	cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
262 263 264 265 266 267 268 269 270
}

module_init(chcr_crypto_init);
module_exit(chcr_crypto_exit);

MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
MODULE_VERSION(DRV_VERSION);