chtls_main.c 15.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Copyright (c) 2018 Chelsio Communications, Inc.
 *
 * Written by: Atul Gupta (atul.gupta@chelsio.com)
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/hash.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/ip.h>
#include <linux/tcp.h>
16 17
#include <net/ipv6.h>
#include <net/transp_v6.h>
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include <net/tcp.h>
#include <net/tls.h>

#include "chtls.h"
#include "chtls_cm.h"

#define DRV_NAME "chtls"

/*
 * chtls device management
 * maintains a list of the chtls devices
 */
static LIST_HEAD(cdev_list);
static DEFINE_MUTEX(cdev_mutex);

static DEFINE_MUTEX(notify_mutex);
static RAW_NOTIFIER_HEAD(listen_notify_list);
35 36
static struct proto chtls_cpl_prot, chtls_cpl_protv6;
struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6;
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;

static void register_listen_notifier(struct notifier_block *nb)
{
	mutex_lock(&notify_mutex);
	raw_notifier_chain_register(&listen_notify_list, nb);
	mutex_unlock(&notify_mutex);
}

static void unregister_listen_notifier(struct notifier_block *nb)
{
	mutex_lock(&notify_mutex);
	raw_notifier_chain_unregister(&listen_notify_list, nb);
	mutex_unlock(&notify_mutex);
}

static int listen_notify_handler(struct notifier_block *this,
				 unsigned long event, void *data)
{
56 57
	struct chtls_listen *clisten;
	int ret = NOTIFY_DONE;
58

59
	clisten = (struct chtls_listen *)data;
60 61 62

	switch (event) {
	case CHTLS_LISTEN_START:
63 64 65
		ret = chtls_listen_start(clisten->cdev, clisten->sk);
		kfree(clisten);
		break;
66
	case CHTLS_LISTEN_STOP:
67 68
		chtls_listen_stop(clisten->cdev, clisten->sk);
		kfree(clisten);
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
		break;
	}
	return ret;
}

static struct notifier_block listen_notifier = {
	.notifier_call = listen_notify_handler
};

static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	if (likely(skb_transport_header(skb) != skb_network_header(skb)))
		return tcp_v4_do_rcv(sk, skb);
	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
	return 0;
}

86
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
87
{
88
	struct chtls_listen *clisten;
89 90 91 92 93 94 95 96 97

	if (sk->sk_protocol != IPPROTO_TCP)
		return -EPROTONOSUPPORT;

	if (sk->sk_family == PF_INET &&
	    LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
		return -EADDRNOTAVAIL;

	sk->sk_backlog_rcv = listen_backlog_rcv;
98 99 100 101 102
	clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
	if (!clisten)
		return -ENOMEM;
	clisten->cdev = cdev;
	clisten->sk = sk;
103
	mutex_lock(&notify_mutex);
104
	raw_notifier_call_chain(&listen_notify_list,
105
				      CHTLS_LISTEN_START, clisten);
106
	mutex_unlock(&notify_mutex);
107
	return 0;
108 109
}

110
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
111
{
112 113
	struct chtls_listen *clisten;

114 115 116
	if (sk->sk_protocol != IPPROTO_TCP)
		return;

117 118 119 120 121
	clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
	if (!clisten)
		return;
	clisten->cdev = cdev;
	clisten->sk = sk;
122 123
	mutex_lock(&notify_mutex);
	raw_notifier_call_chain(&listen_notify_list,
124
				CHTLS_LISTEN_STOP, clisten);
125 126 127
	mutex_unlock(&notify_mutex);
}

128
static int chtls_inline_feature(struct tls_toe_device *dev)
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
{
	struct net_device *netdev;
	struct chtls_dev *cdev;
	int i;

	cdev = to_chtls_dev(dev);

	for (i = 0; i < cdev->lldi->nports; i++) {
		netdev = cdev->ports[i];
		if (netdev->features & NETIF_F_HW_TLS_RECORD)
			return 1;
	}
	return 0;
}

144
static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk)
145
{
146 147
	struct chtls_dev *cdev = to_chtls_dev(dev);

148
	if (sk->sk_state == TCP_LISTEN)
149
		return chtls_start_listen(cdev, sk);
150 151 152
	return 0;
}

153
static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk)
154
{
155 156
	struct chtls_dev *cdev = to_chtls_dev(dev);

157
	if (sk->sk_state == TCP_LISTEN)
158
		chtls_stop_listen(cdev, sk);
159 160
}

161 162 163 164
static void chtls_free_uld(struct chtls_dev *cdev)
{
	int i;

165
	tls_toe_unregister_device(&cdev->tlsdev);
166 167 168 169 170 171 172 173 174 175 176
	kvfree(cdev->kmap.addr);
	idr_destroy(&cdev->hwtid_idr);
	for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
		kfree_skb(cdev->rspq_skb_cache[i]);
	kfree(cdev->lldi);
	kfree_skb(cdev->askb);
	kfree(cdev);
}

static inline void chtls_dev_release(struct kref *kref)
{
177
	struct tls_toe_device *dev;
178
	struct chtls_dev *cdev;
179
	struct adapter *adap;
180

181
	dev = container_of(kref, struct tls_toe_device, kref);
182
	cdev = to_chtls_dev(dev);
183 184 185 186 187 188

	/* Reset tls rx/tx stats */
	adap = pci_get_drvdata(cdev->pdev);
	atomic_set(&adap->chcr_stats.tls_pdu_tx, 0);
	atomic_set(&adap->chcr_stats.tls_pdu_rx, 0);

189 190 191
	chtls_free_uld(cdev);
}

192 193
static void chtls_register_dev(struct chtls_dev *cdev)
{
194
	struct tls_toe_device *tlsdev = &cdev->tlsdev;
195

196
	strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
197
	strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
198
		TLS_TOE_DEVICE_NAME_MAX);
199 200 201
	tlsdev->feature = chtls_inline_feature;
	tlsdev->hash = chtls_create_hash;
	tlsdev->unhash = chtls_destroy_hash;
202 203
	tlsdev->release = chtls_dev_release;
	kref_init(&tlsdev->kref);
204
	tls_toe_register_device(tlsdev);
205
	cdev->cdev_state = CHTLS_CDEV_STATE_UP;
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
}

static void process_deferq(struct work_struct *task_param)
{
	struct chtls_dev *cdev = container_of(task_param,
				struct chtls_dev, deferq_task);
	struct sk_buff *skb;

	spin_lock_bh(&cdev->deferq.lock);
	while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
		spin_unlock_bh(&cdev->deferq.lock);
		DEFERRED_SKB_CB(skb)->handler(cdev, skb);
		spin_lock_bh(&cdev->deferq.lock);
	}
	spin_unlock_bh(&cdev->deferq.lock);
}

static int chtls_get_skb(struct chtls_dev *cdev)
{
	cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
	if (!cdev->askb)
		return -ENOMEM;

	skb_put(cdev->askb, sizeof(struct tcphdr));
	skb_reset_transport_header(cdev->askb);
	memset(cdev->askb->data, 0, cdev->askb->len);
	return 0;
}

static void *chtls_uld_add(const struct cxgb4_lld_info *info)
{
	struct cxgb4_lld_info *lldi;
	struct chtls_dev *cdev;
	int i, j;

241
	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
	if (!cdev)
		goto out;

	lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
	if (!lldi)
		goto out_lldi;

	if (chtls_get_skb(cdev))
		goto out_skb;

	*lldi = *info;
	cdev->lldi = lldi;
	cdev->pdev = lldi->pdev;
	cdev->tids = lldi->tids;
	cdev->ports = lldi->ports;
	cdev->mtus = lldi->mtus;
	cdev->tids = lldi->tids;
	cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
			<< FW_VIID_PFN_S;

	for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
		unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;

		cdev->rspq_skb_cache[i] = __alloc_skb(size,
						      gfp_any(), 0,
						      lldi->nodeid);
		if (unlikely(!cdev->rspq_skb_cache[i]))
			goto out_rspq_skb;
	}

	idr_init(&cdev->hwtid_idr);
	INIT_WORK(&cdev->deferq_task, process_deferq);
	spin_lock_init(&cdev->listen_lock);
	spin_lock_init(&cdev->idr_lock);
	cdev->send_page_order = min_t(uint, get_order(32768),
				      send_page_order);
278
	cdev->max_host_sndbuf = 48 * 1024;
279 280 281 282 283 284 285 286 287 288 289

	if (lldi->vr->key.size)
		if (chtls_init_kmap(cdev, lldi))
			goto out_rspq_skb;

	mutex_lock(&cdev_mutex);
	list_add_tail(&cdev->list, &cdev_list);
	mutex_unlock(&cdev_mutex);

	return cdev;
out_rspq_skb:
290
	for (j = 0; j < i; j++)
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
		kfree_skb(cdev->rspq_skb_cache[j]);
	kfree_skb(cdev->askb);
out_skb:
	kfree(lldi);
out_lldi:
	kfree(cdev);
out:
	return NULL;
}

static void chtls_free_all_uld(void)
{
	struct chtls_dev *cdev, *tmp;

	mutex_lock(&cdev_mutex);
306
	list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
307 308 309 310
		if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
			list_del(&cdev->list);
			kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
		}
311
	}
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	mutex_unlock(&cdev_mutex);
}

static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
{
	struct chtls_dev *cdev = handle;

	switch (new_state) {
	case CXGB4_STATE_UP:
		chtls_register_dev(cdev);
		break;
	case CXGB4_STATE_DOWN:
		break;
	case CXGB4_STATE_START_RECOVERY:
		break;
	case CXGB4_STATE_DETACH:
		mutex_lock(&cdev_mutex);
		list_del(&cdev->list);
		mutex_unlock(&cdev_mutex);
331
		kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
		break;
	default:
		break;
	}
	return 0;
}

static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
					  const __be64 *rsp,
					  u32 pktshift)
{
	struct sk_buff *skb;

	/* Allocate space for cpl_pass_accpet_req which will be synthesized by
	 * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
	 * through the regular cpl_pass_accept_req processing in TOM.
	 */
	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
			- pktshift, GFP_ATOMIC);
	if (unlikely(!skb))
		return NULL;
	__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
		   - pktshift);
	/* For now we will copy  cpl_rx_pkt in the skb */
	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
	skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
				       , gl->va + pktshift,
				       gl->tot_len - pktshift);

	return skb;
}

static int chtls_recv_packet(struct chtls_dev *cdev,
			     const struct pkt_gl *gl, const __be64 *rsp)
{
	unsigned int opcode = *(u8 *)rsp;
	struct sk_buff *skb;
	int ret;

	skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
	if (!skb)
		return -ENOMEM;

	ret = chtls_handlers[opcode](cdev, skb);
	if (ret & CPL_RET_BUF_DONE)
		kfree_skb(skb);

	return 0;
}

static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
{
	unsigned long rspq_bin;
	unsigned int opcode;
	struct sk_buff *skb;
	unsigned int len;
	int ret;

	len = 64 - sizeof(struct rsp_ctrl) - 8;
	opcode = *(u8 *)rsp;

	rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
	skb = cdev->rspq_skb_cache[rspq_bin];
	if (skb && !skb_is_nonlinear(skb) &&
	    !skb_shared(skb) && !skb_cloned(skb)) {
		refcount_inc(&skb->users);
		if (refcount_read(&skb->users) == 2) {
			__skb_trim(skb, 0);
			if (skb_tailroom(skb) >= len)
				goto copy_out;
		}
		refcount_dec(&skb->users);
	}
	skb = alloc_skb(len, GFP_ATOMIC);
	if (unlikely(!skb))
		return -ENOMEM;

copy_out:
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, rsp, len);
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);
	ret = chtls_handlers[opcode](cdev, skb);

	if (ret & CPL_RET_BUF_DONE)
		kfree_skb(skb);
	return 0;
}

static void chtls_recv(struct chtls_dev *cdev,
		       struct sk_buff **skbs, const __be64 *rsp)
{
	struct sk_buff *skb = *skbs;
	unsigned int opcode;
	int ret;

	opcode = *(u8 *)rsp;

	__skb_push(skb, sizeof(struct rss_header));
	skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));

	ret = chtls_handlers[opcode](cdev, skb);
	if (ret & CPL_RET_BUF_DONE)
		kfree_skb(skb);
}

static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
				const struct pkt_gl *gl)
{
	struct chtls_dev *cdev = handle;
	unsigned int opcode;
	struct sk_buff *skb;

	opcode = *(u8 *)rsp;

	if (unlikely(opcode == CPL_RX_PKT)) {
		if (chtls_recv_packet(cdev, gl, rsp) < 0)
			goto nomem;
		return 0;
	}

	if (!gl)
		return chtls_recv_rsp(cdev, rsp);

#define RX_PULL_LEN 128
	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
	if (unlikely(!skb))
		goto nomem;
	chtls_recv(cdev, &skb, rsp);
	return 0;

nomem:
	return -ENOMEM;
}

static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
			       int __user *optlen)
{
470
	struct tls_crypto_info crypto_info = { 0 };
471 472 473 474 475 476 477 478 479 480 481 482 483

	crypto_info.version = TLS_1_2_VERSION;
	if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
		return -EFAULT;
	return 0;
}

static int chtls_getsockopt(struct sock *sk, int level, int optname,
			    char __user *optval, int __user *optlen)
{
	struct tls_context *ctx = tls_get_ctx(sk);

	if (level != SOL_TLS)
484 485
		return ctx->sk_proto->getsockopt(sk, level,
						 optname, optval, optlen);
486 487 488 489 490

	return do_chtls_getsockopt(sk, optval, optlen);
}

static int do_chtls_setsockopt(struct sock *sk, int optname,
491
			       sockptr_t optval, unsigned int optlen)
492 493 494 495
{
	struct tls_crypto_info *crypto_info, tmp_crypto_info;
	struct chtls_sock *csk;
	int keylen;
496
	int cipher_type;
497 498 499 500
	int rc = 0;

	csk = rcu_dereference_sk_user_data(sk);

501
	if (sockptr_is_null(optval) || optlen < sizeof(*crypto_info)) {
502 503 504 505
		rc = -EINVAL;
		goto out;
	}

506
	rc = copy_from_sockptr(&tmp_crypto_info, optval, sizeof(*crypto_info));
507 508 509 510 511 512 513 514 515 516 517 518 519
	if (rc) {
		rc = -EFAULT;
		goto out;
	}

	/* check version */
	if (tmp_crypto_info.version != TLS_1_2_VERSION) {
		rc = -ENOTSUPP;
		goto out;
	}

	crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;

520 521 522
	/* GCM mode of AES supports 128 and 256 bit encryption, so
	 * copy keys from user based on GCM cipher type.
	 */
523 524
	switch (tmp_crypto_info.cipher_type) {
	case TLS_CIPHER_AES_GCM_128: {
525 526 527
		/* Obtain version and type from previous copy */
		crypto_info[0] = tmp_crypto_info;
		/* Now copy the following data */
C
Christoph Hellwig 已提交
528 529 530
		rc = copy_from_sockptr_offset((char *)crypto_info +
				sizeof(*crypto_info),
				optval, sizeof(*crypto_info),
531 532
				sizeof(struct tls12_crypto_info_aes_gcm_128)
				- sizeof(*crypto_info));
533 534 535 536 537 538 539

		if (rc) {
			rc = -EFAULT;
			goto out;
		}

		keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
540 541 542 543 544
		cipher_type = TLS_CIPHER_AES_GCM_128;
		break;
	}
	case TLS_CIPHER_AES_GCM_256: {
		crypto_info[0] = tmp_crypto_info;
C
Christoph Hellwig 已提交
545 546 547
		rc = copy_from_sockptr_offset((char *)crypto_info +
				sizeof(*crypto_info),
				optval, sizeof(*crypto_info),
548 549 550 551 552 553 554 555 556 557
				sizeof(struct tls12_crypto_info_aes_gcm_256)
				- sizeof(*crypto_info));

		if (rc) {
			rc = -EFAULT;
			goto out;
		}

		keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
		cipher_type = TLS_CIPHER_AES_GCM_256;
558 559 560 561 562 563
		break;
	}
	default:
		rc = -EINVAL;
		goto out;
	}
564
	rc = chtls_setkey(csk, keylen, optname, cipher_type);
565 566 567 568 569
out:
	return rc;
}

static int chtls_setsockopt(struct sock *sk, int level, int optname,
570
			    sockptr_t optval, unsigned int optlen)
571 572 573 574
{
	struct tls_context *ctx = tls_get_ctx(sk);

	if (level != SOL_TLS)
575 576
		return ctx->sk_proto->setsockopt(sk, level,
						 optname, optval, optlen);
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592

	return do_chtls_setsockopt(sk, optname, optval, optlen);
}

static struct cxgb4_uld_info chtls_uld_info = {
	.name = DRV_NAME,
	.nrxq = MAX_ULD_QSETS,
	.ntxq = MAX_ULD_QSETS,
	.rxq_size = 1024,
	.add = chtls_uld_add,
	.state_change = chtls_uld_state_change,
	.rx_handler = chtls_uld_rx_handler,
};

void chtls_install_cpl_ops(struct sock *sk)
{
593 594 595 596
	if (sk->sk_family == AF_INET)
		sk->sk_prot = &chtls_cpl_prot;
	else
		sk->sk_prot = &chtls_cpl_protv6;
597 598 599 600 601 602 603 604 605 606 607
}

static void __init chtls_init_ulp_ops(void)
{
	chtls_cpl_prot			= tcp_prot;
	chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
			   &tcp_prot, PF_INET);
	chtls_cpl_prot.close		= chtls_close;
	chtls_cpl_prot.disconnect	= chtls_disconnect;
	chtls_cpl_prot.destroy		= chtls_destroy_sock;
	chtls_cpl_prot.shutdown		= chtls_shutdown;
608 609
	chtls_cpl_prot.sendmsg		= chtls_sendmsg;
	chtls_cpl_prot.sendpage		= chtls_sendpage;
610
	chtls_cpl_prot.recvmsg		= chtls_recvmsg;
611 612
	chtls_cpl_prot.setsockopt	= chtls_setsockopt;
	chtls_cpl_prot.getsockopt	= chtls_getsockopt;
613
#if IS_ENABLED(CONFIG_IPV6)
614 615 616
	chtls_cpl_protv6		= chtls_cpl_prot;
	chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6,
			   &tcpv6_prot, PF_INET6);
617
#endif
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
}

static int __init chtls_register(void)
{
	chtls_init_ulp_ops();
	register_listen_notifier(&listen_notifier);
	cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
	return 0;
}

static void __exit chtls_unregister(void)
{
	unregister_listen_notifier(&listen_notifier);
	chtls_free_all_uld();
	cxgb4_unregister_uld(CXGB4_ULD_TLS);
}

module_init(chtls_register);
module_exit(chtls_unregister);

MODULE_DESCRIPTION("Chelsio TLS Inline driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
641
MODULE_VERSION(CHTLS_DRV_VERSION);