esp6.c 20.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C)2002 USAGI/WIDE Project
3
 *
L
Linus Torvalds 已提交
4 5 6 7
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
8
 *
L
Linus Torvalds 已提交
9 10 11 12
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
13
 *
L
Linus Torvalds 已提交
14
 * You should have received a copy of the GNU General Public License
15
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
L
Linus Torvalds 已提交
16 17 18
 *
 * Authors
 *
19
 *	Mitsuru KANDA @USAGI       : IPv6 Support
20 21
 *	Kazunori MIYAZAWA @USAGI   :
 *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
22
 *
23
 *	This file is derived from net/ipv4/esp.c
L
Linus Torvalds 已提交
24 25
 */

26 27
#define pr_fmt(fmt) "IPv6: " fmt

28 29
#include <crypto/aead.h>
#include <crypto/authenc.h>
30
#include <linux/err.h>
L
Linus Torvalds 已提交
31 32 33 34
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
35
#include <linux/scatterlist.h>
H
Herbert Xu 已提交
36
#include <linux/kernel.h>
L
Linus Torvalds 已提交
37 38
#include <linux/pfkeyv2.h>
#include <linux/random.h>
39
#include <linux/slab.h>
40
#include <linux/spinlock.h>
41
#include <net/ip6_route.h>
L
Linus Torvalds 已提交
42 43
#include <net/icmp.h>
#include <net/ipv6.h>
44
#include <net/protocol.h>
L
Linus Torvalds 已提交
45 46
#include <linux/icmpv6.h>

47 48
#include <linux/highmem.h>

49 50 51 52 53 54 55
struct esp_skb_cb {
	struct xfrm_skb_cb xfrm;
	void *tmp;
};

#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))

56 57
static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);

58 59 60
/*
 * Allocate an AEAD request structure with extra space for SG and IV.
 *
61 62 63
 * For alignment considerations the upper 32 bits of the sequence number are
 * placed at the front, if present. Followed by the IV, the request and finally
 * the SG list.
64 65 66
 *
 * TODO: Use spare space in skb for this where possible.
 */
67
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
68 69 70
{
	unsigned int len;

71 72 73 74
	len = seqihlen;

	len += crypto_aead_ivsize(aead);

75 76 77 78 79 80
	if (len) {
		len += crypto_aead_alignmask(aead) &
		       ~(crypto_tfm_ctx_alignment() - 1);
		len = ALIGN(len, crypto_tfm_ctx_alignment());
	}

H
Herbert Xu 已提交
81
	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
82 83 84 85 86 87 88
	len = ALIGN(len, __alignof__(struct scatterlist));

	len += sizeof(struct scatterlist) * nfrags;

	return kmalloc(len, GFP_ATOMIC);
}

89 90 91 92 93 94
static inline __be32 *esp_tmp_seqhi(void *tmp)
{
	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
}

static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 96
{
	return crypto_aead_ivsize(aead) ?
97 98
	       PTR_ALIGN((u8 *)tmp + seqhilen,
			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
}

static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
{
	struct aead_request *req;

	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
				crypto_tfm_ctx_alignment());
	aead_request_set_tfm(req, aead);
	return req;
}

static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
					     struct aead_request *req)
{
	return (void *)ALIGN((unsigned long)(req + 1) +
			     crypto_aead_reqsize(aead),
			     __alignof__(struct scatterlist));
}

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
{
	__be32 *seqhi;
	struct crypto_aead *aead = x->data;
	int seqhilen = 0;
	u8 *iv;
	struct aead_request *req;
	struct scatterlist *sg;

	if (x->props.flags & XFRM_STATE_ESN)
		seqhilen += sizeof(__be32);

	seqhi = esp_tmp_seqhi(tmp);
	iv = esp_tmp_iv(aead, tmp, seqhilen);
	req = esp_tmp_req(aead, iv);

	/* Unref skb_frag_pages in the src scatterlist if necessary.
	 * Skip the first sg which comes from skb->data.
	 */
	if (req->src != req->dst)
		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
			put_page(sg_page(sg));
}

143 144 145
static void esp_output_done(struct crypto_async_request *base, int err)
{
	struct sk_buff *skb = base->data;
146 147 148
	void *tmp;
	struct dst_entry *dst = skb_dst(skb);
	struct xfrm_state *x = dst->xfrm;
149

150 151 152
	tmp = ESP_SKB_CB(skb)->tmp;
	esp_ssg_unref(x, tmp);
	kfree(tmp);
153 154 155
	xfrm_output_resume(skb, err);
}

H
Herbert Xu 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
/* Move ESP header back into place. */
static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
{
	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
	void *tmp = ESP_SKB_CB(skb)->tmp;
	__be32 *seqhi = esp_tmp_seqhi(tmp);

	esph->seq_no = esph->spi;
	esph->spi = *seqhi;
}

static void esp_output_restore_header(struct sk_buff *skb)
{
	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
					     struct ip_esp_hdr *esph,
					     __be32 *seqhi)
{
	struct xfrm_state *x = skb_dst(skb)->xfrm;

	/* For ESN we move the header forward by 4 bytes to
	 * accomodate the high bits.  We will move it back after
	 * encryption.
	 */
	if ((x->props.flags & XFRM_STATE_ESN)) {
		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
		*seqhi = esph->spi;
		esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
	}

	esph->spi = x->id.spi;

	return esph;
}

H
Herbert Xu 已提交
193 194 195 196 197 198 199 200
static void esp_output_done_esn(struct crypto_async_request *base, int err)
{
	struct sk_buff *skb = base->data;

	esp_output_restore_header(skb);
	esp_output_done(base, err);
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
{
	/* Fill padding... */
	if (tfclen) {
		memset(tail, 0, tfclen);
		tail += tfclen;
	}
	do {
		int i;
		for (i = 0; i < plen - 2; i++)
			tail[i] = i + 1;
	} while (0);
	tail[plen - 2] = plen - 2;
	tail[plen - 1] = proto;
}

L
Linus Torvalds 已提交
217 218 219
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
{
	int err;
220
	struct ip_esp_hdr *esph;
221
	struct crypto_aead *aead;
H
Herbert Xu 已提交
222
	struct aead_request *req;
223
	struct scatterlist *sg, *dsg;
L
Linus Torvalds 已提交
224
	struct sk_buff *trailer;
225
	struct page *page;
226
	void *tmp;
L
Linus Torvalds 已提交
227 228 229
	int blksize;
	int clen;
	int alen;
230
	int plen;
H
Herbert Xu 已提交
231
	int ivlen;
232
	int tfclen;
L
Linus Torvalds 已提交
233
	int nfrags;
234 235
	int assoclen;
	int seqhilen;
236
	int tailen;
237
	u8 *iv;
238
	u8 *tail;
239
	u8 *vaddr;
240
	__be32 *seqhi;
H
Herbert Xu 已提交
241
	__be64 seqno;
242
	__u8 proto = *skb_mac_header(skb);
L
Linus Torvalds 已提交
243

244
	/* skb is pure payload to encrypt */
245
	aead = x->data;
246
	alen = crypto_aead_authsize(aead);
H
Herbert Xu 已提交
247
	ivlen = crypto_aead_ivsize(aead);
248

249 250 251 252 253 254 255 256 257
	tfclen = 0;
	if (x->tfcpad) {
		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
		u32 padto;

		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
		if (skb->len < padto)
			tfclen = padto - skb->len;
	}
258
	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
259 260
	clen = ALIGN(skb->len + 2 + tfclen, blksize);
	plen = clen - skb->len - tfclen;
261
	tailen = tfclen + plen + alen;
262

263 264 265 266 267 268 269 270
	assoclen = sizeof(*esph);
	seqhilen = 0;

	if (x->props.flags & XFRM_STATE_ESN) {
		seqhilen += sizeof(__be32);
		assoclen += seqhilen;
	}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	*skb_mac_header(skb) = IPPROTO_ESP;
	esph = ip_esp_hdr(skb);

	if (!skb_cloned(skb)) {
		if (tailen <= skb_availroom(skb)) {
			nfrags = 1;
			trailer = skb;
			tail = skb_tail_pointer(trailer);

			goto skip_cow;
		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
			   && !skb_has_frag_list(skb)) {
			int allocsize;
			struct sock *sk = skb->sk;
			struct page_frag *pfrag = &x->xfrag;

			allocsize = ALIGN(tailen, L1_CACHE_BYTES);

			spin_lock_bh(&x->lock);

			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
				spin_unlock_bh(&x->lock);
				goto cow;
			}

			page = pfrag->page;
			get_page(page);

			vaddr = kmap_atomic(page);

			tail = vaddr + pfrag->offset;

303
			esp_output_fill_trailer(tail, tfclen, plen, proto);
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370

			kunmap_atomic(vaddr);

			nfrags = skb_shinfo(skb)->nr_frags;

			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
					     tailen);
			skb_shinfo(skb)->nr_frags = ++nfrags;

			pfrag->offset = pfrag->offset + allocsize;
			nfrags++;

			skb->len += tailen;
			skb->data_len += tailen;
			skb->truesize += tailen;
			if (sk)
				atomic_add(tailen, &sk->sk_wmem_alloc);

			skb_push(skb, -skb_network_offset(skb));

			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
			esph->spi = x->id.spi;

			tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen);
			if (!tmp) {
				spin_unlock_bh(&x->lock);
				err = -ENOMEM;
				goto error;
			}
			seqhi = esp_tmp_seqhi(tmp);
			iv = esp_tmp_iv(aead, tmp, seqhilen);
			req = esp_tmp_req(aead, iv);
			sg = esp_req_sg(aead, req);
			dsg = &sg[nfrags];

			esph = esp_output_set_esn(skb, esph, seqhi);

			sg_init_table(sg, nfrags);
			skb_to_sgvec(skb, sg,
				     (unsigned char *)esph - skb->data,
				     assoclen + ivlen + clen + alen);

			allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);

			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
				spin_unlock_bh(&x->lock);
				err = -ENOMEM;
				goto error;
			}

			skb_shinfo(skb)->nr_frags = 1;

			page = pfrag->page;
			get_page(page);
			/* replace page frags in skb with new page */
			__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
			pfrag->offset = pfrag->offset + allocsize;

			sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
			skb_to_sgvec(skb, dsg,
				     (unsigned char *)esph - skb->data,
				     assoclen + ivlen + clen + alen);

			spin_unlock_bh(&x->lock);

			goto skip_cow2;
		}
J
Julia Lawall 已提交
371
	}
372

373 374 375 376 377
cow:
	err = skb_cow_data(skb, tailen, &trailer);
	if (err < 0)
		goto error;
	nfrags = err;
L
Linus Torvalds 已提交
378

379
	tail = skb_tail_pointer(trailer);
380 381 382
	esph = ip_esp_hdr(skb);

skip_cow:
383
	esp_output_fill_trailer(tail, tfclen, plen, proto);
L
Linus Torvalds 已提交
384

385
	pskb_put(skb, trailer, clen - skb->len + alen);
386
	skb_push(skb, -skb_network_offset(skb));
L
Linus Torvalds 已提交
387

388
	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
389
	esph->spi = x->id.spi;
L
Linus Torvalds 已提交
390

391 392 393 394
	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
	if (!tmp) {
		err = -ENOMEM;
		goto error;
H
Herbert Xu 已提交
395 396
	}

397 398 399 400 401 402 403
	seqhi = esp_tmp_seqhi(tmp);
	iv = esp_tmp_iv(aead, tmp, seqhilen);
	req = esp_tmp_req(aead, iv);
	sg = esp_req_sg(aead, req);
	dsg = sg;

	esph = esp_output_set_esn(skb, esph, seqhi);
H
Herbert Xu 已提交
404

405 406
	sg_init_table(sg, nfrags);
	skb_to_sgvec(skb, sg,
H
Herbert Xu 已提交
407 408
		     (unsigned char *)esph - skb->data,
		     assoclen + ivlen + clen + alen);
409

410 411 412 413 414 415 416
skip_cow2:
	if ((x->props.flags & XFRM_STATE_ESN))
		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
	else
		aead_request_set_callback(req, 0, esp_output_done, skb);

	aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
H
Herbert Xu 已提交
417 418 419 420 421 422 423 424
	aead_request_set_ad(req, assoclen);

	seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));

	memset(iv, 0, ivlen);
	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8),
	       min(ivlen, 8));
L
Linus Torvalds 已提交
425

426
	ESP_SKB_CB(skb)->tmp = tmp;
H
Herbert Xu 已提交
427 428 429 430
	err = crypto_aead_encrypt(req);

	switch (err) {
	case -EINPROGRESS:
431
		goto error;
L
Linus Torvalds 已提交
432

H
Herbert Xu 已提交
433
	case -EBUSY:
434
		err = NET_XMIT_DROP;
H
Herbert Xu 已提交
435 436 437 438 439 440
		break;

	case 0:
		if ((x->props.flags & XFRM_STATE_ESN))
			esp_output_restore_header(skb);
	}
441

442 443
	if (sg != dsg)
		esp_ssg_unref(x, tmp);
444 445 446 447 448 449 450 451 452
	kfree(tmp);

error:
	return err;
}

static int esp_input_done2(struct sk_buff *skb, int err)
{
	struct xfrm_state *x = xfrm_input_state(skb);
453
	struct crypto_aead *aead = x->data;
454 455 456 457 458 459 460 461
	int alen = crypto_aead_authsize(aead);
	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
	int elen = skb->len - hlen;
	int hdr_len = skb_network_header_len(skb);
	int padlen;
	u8 nexthdr[2];

	kfree(ESP_SKB_CB(skb)->tmp);
L
Linus Torvalds 已提交
462

463
	if (unlikely(err))
464
		goto out;
465

466 467
	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
		BUG();
L
Linus Torvalds 已提交
468

469 470 471
	err = -EINVAL;
	padlen = nexthdr[0];
	if (padlen + 2 + alen >= elen) {
472 473
		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
				    padlen + 2, elen - alen);
474
		goto out;
L
Linus Torvalds 已提交
475 476
	}

477
	/* ... check padding bits here. Silly. :-) */
478

479 480
	pskb_trim(skb, skb->len - alen - padlen - 2);
	__skb_pull(skb, hlen);
481 482 483 484
	if (x->props.mode == XFRM_MODE_TUNNEL)
		skb_reset_transport_header(skb);
	else
		skb_set_transport_header(skb, -hdr_len);
485 486 487 488 489 490 491 492

	err = nexthdr[1];

	/* RFC4303: Drop dummy packets without any error */
	if (err == IPPROTO_NONE)
		err = -EINVAL;

out:
L
Linus Torvalds 已提交
493 494 495
	return err;
}

496 497 498 499 500 501 502
static void esp_input_done(struct crypto_async_request *base, int err)
{
	struct sk_buff *skb = base->data;

	xfrm_input_resume(skb, esp_input_done2(skb, err));
}

H
Herbert Xu 已提交
503 504 505 506 507 508
static void esp_input_restore_header(struct sk_buff *skb)
{
	esp_restore_header(skb, 0);
	__skb_pull(skb, 4);
}

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
{
	struct xfrm_state *x = xfrm_input_state(skb);
	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;

	/* For ESN we move the header forward by 4 bytes to
	 * accomodate the high bits.  We will move it back after
	 * decryption.
	 */
	if ((x->props.flags & XFRM_STATE_ESN)) {
		esph = (void *)skb_push(skb, 4);
		*seqhi = esph->spi;
		esph->spi = esph->seq_no;
		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
	}
}

H
Herbert Xu 已提交
526 527 528 529 530 531 532 533
static void esp_input_done_esn(struct crypto_async_request *base, int err)
{
	struct sk_buff *skb = base->data;

	esp_input_restore_header(skb);
	esp_input_done(base, err);
}

534
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
L
Linus Torvalds 已提交
535
{
536
	struct ip_esp_hdr *esph;
537
	struct crypto_aead *aead = x->data;
538
	struct aead_request *req;
L
Linus Torvalds 已提交
539
	struct sk_buff *trailer;
H
Herbert Xu 已提交
540 541
	int ivlen = crypto_aead_ivsize(aead);
	int elen = skb->len - sizeof(*esph) - ivlen;
L
Linus Torvalds 已提交
542
	int nfrags;
543 544
	int assoclen;
	int seqhilen;
L
Linus Torvalds 已提交
545
	int ret = 0;
546
	void *tmp;
547
	__be32 *seqhi;
548 549
	u8 *iv;
	struct scatterlist *sg;
L
Linus Torvalds 已提交
550

H
Herbert Xu 已提交
551
	if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
L
Linus Torvalds 已提交
552
		ret = -EINVAL;
553
		goto out;
L
Linus Torvalds 已提交
554 555
	}

556
	if (elen <= 0) {
L
Linus Torvalds 已提交
557
		ret = -EINVAL;
558
		goto out;
L
Linus Torvalds 已提交
559 560
	}

561 562 563 564 565 566 567 568
	assoclen = sizeof(*esph);
	seqhilen = 0;

	if (x->props.flags & XFRM_STATE_ESN) {
		seqhilen += sizeof(__be32);
		assoclen += seqhilen;
	}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
	if (!skb_cloned(skb)) {
		if (!skb_is_nonlinear(skb)) {
			nfrags = 1;

			goto skip_cow;
		} else if (!skb_has_frag_list(skb)) {
			nfrags = skb_shinfo(skb)->nr_frags;
			nfrags++;

			goto skip_cow;
		}
	}

	nfrags = skb_cow_data(skb, 0, &trailer);
	if (nfrags < 0) {
		ret = -EINVAL;
		goto out;
	}

skip_cow:
	ret = -ENOMEM;
H
Herbert Xu 已提交
590
	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
591 592
	if (!tmp)
		goto out;
L
Linus Torvalds 已提交
593

594
	ESP_SKB_CB(skb)->tmp = tmp;
595 596
	seqhi = esp_tmp_seqhi(tmp);
	iv = esp_tmp_iv(aead, tmp, seqhilen);
597
	req = esp_tmp_req(aead, iv);
H
Herbert Xu 已提交
598
	sg = esp_req_sg(aead, req);
L
Linus Torvalds 已提交
599

600
	esp_input_set_header(skb, seqhi);
L
Linus Torvalds 已提交
601

602 603
	sg_init_table(sg, nfrags);
	skb_to_sgvec(skb, sg, 0, skb->len);
L
Linus Torvalds 已提交
604

605
	skb->ip_summed = CHECKSUM_NONE;
606

607
	if ((x->props.flags & XFRM_STATE_ESN))
H
Herbert Xu 已提交
608
		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
609 610
	else
		aead_request_set_callback(req, 0, esp_input_done, skb);
H
Herbert Xu 已提交
611 612 613

	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
	aead_request_set_ad(req, assoclen);
L
Linus Torvalds 已提交
614

615 616 617
	ret = crypto_aead_decrypt(req);
	if (ret == -EINPROGRESS)
		goto out;
618

H
Herbert Xu 已提交
619 620 621
	if ((x->props.flags & XFRM_STATE_ESN))
		esp_input_restore_header(skb);

622
	ret = esp_input_done2(skb, ret);
L
Linus Torvalds 已提交
623 624 625 626 627

out:
	return ret;
}

628
static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
L
Linus Torvalds 已提交
629
{
630 631
	struct crypto_aead *aead = x->data;
	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
632
	unsigned int net_adj;
L
Linus Torvalds 已提交
633

634 635 636 637
	if (x->props.mode != XFRM_MODE_TUNNEL)
		net_adj = sizeof(struct ipv6hdr);
	else
		net_adj = 0;
638

639
	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
640
		 net_adj) & ~(blksize - 1)) + net_adj - 2;
L
Linus Torvalds 已提交
641 642
}

643 644
static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
		    u8 type, u8 code, int offset, __be32 info)
L
Linus Torvalds 已提交
645
{
A
Alexey Dobriyan 已提交
646
	struct net *net = dev_net(skb->dev);
647
	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
648
	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
L
Linus Torvalds 已提交
649 650
	struct xfrm_state *x;

651
	if (type != ICMPV6_PKT_TOOBIG &&
652
	    type != NDISC_REDIRECT)
653
		return 0;
L
Linus Torvalds 已提交
654

655 656
	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
			      esph->spi, IPPROTO_ESP, AF_INET6);
L
Linus Torvalds 已提交
657
	if (!x)
658
		return 0;
659 660

	if (type == NDISC_REDIRECT)
661 662
		ip6_redirect(skb, net, skb->dev->ifindex, 0,
			     sock_net_uid(net, NULL));
663
	else
664
		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
L
Linus Torvalds 已提交
665
	xfrm_state_put(x);
666 667

	return 0;
L
Linus Torvalds 已提交
668 669 670 671
}

static void esp6_destroy(struct xfrm_state *x)
{
672
	struct crypto_aead *aead = x->data;
L
Linus Torvalds 已提交
673

674
	if (!aead)
L
Linus Torvalds 已提交
675 676
		return;

677
	crypto_free_aead(aead);
L
Linus Torvalds 已提交
678 679
}

680 681
static int esp_init_aead(struct xfrm_state *x)
{
H
Herbert Xu 已提交
682
	char aead_name[CRYPTO_MAX_ALG_NAME];
683 684 685
	struct crypto_aead *aead;
	int err;

H
Herbert Xu 已提交
686 687 688 689 690 691
	err = -ENAMETOOLONG;
	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
		goto error;

	aead = crypto_alloc_aead(aead_name, 0, 0);
692 693 694 695
	err = PTR_ERR(aead);
	if (IS_ERR(aead))
		goto error;

696
	x->data = aead;
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711

	err = crypto_aead_setkey(aead, x->aead->alg_key,
				 (x->aead->alg_key_len + 7) / 8);
	if (err)
		goto error;

	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
	if (err)
		goto error;

error:
	return err;
}

static int esp_init_authenc(struct xfrm_state *x)
L
Linus Torvalds 已提交
712
{
713 714 715 716 717 718 719 720
	struct crypto_aead *aead;
	struct crypto_authenc_key_param *param;
	struct rtattr *rta;
	char *key;
	char *p;
	char authenc_name[CRYPTO_MAX_ALG_NAME];
	unsigned int keylen;
	int err;
L
Linus Torvalds 已提交
721

722
	err = -EINVAL;
723
	if (!x->ealg)
724
		goto error;
725

726
	err = -ENAMETOOLONG;
727 728 729

	if ((x->props.flags & XFRM_STATE_ESN)) {
		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
H
Herbert Xu 已提交
730 731
			     "%s%sauthencesn(%s,%s)%s",
			     x->geniv ?: "", x->geniv ? "(" : "",
732
			     x->aalg ? x->aalg->alg_name : "digest_null",
H
Herbert Xu 已提交
733 734
			     x->ealg->alg_name,
			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
735 736 737
			goto error;
	} else {
		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
H
Herbert Xu 已提交
738 739
			     "%s%sauthenc(%s,%s)%s",
			     x->geniv ?: "", x->geniv ? "(" : "",
740
			     x->aalg ? x->aalg->alg_name : "digest_null",
H
Herbert Xu 已提交
741 742
			     x->ealg->alg_name,
			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
743 744
			goto error;
	}
745 746 747 748 749 750

	aead = crypto_alloc_aead(authenc_name, 0, 0);
	err = PTR_ERR(aead);
	if (IS_ERR(aead))
		goto error;

751
	x->data = aead;
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766

	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
	err = -ENOMEM;
	key = kmalloc(keylen, GFP_KERNEL);
	if (!key)
		goto error;

	p = key;
	rta = (void *)p;
	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
	rta->rta_len = RTA_LENGTH(sizeof(*param));
	param = RTA_DATA(rta);
	p += RTA_SPACE(sizeof(*param));

L
Linus Torvalds 已提交
767 768 769
	if (x->aalg) {
		struct xfrm_algo_desc *aalg_desc;

770 771
		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
		p += (x->aalg->alg_key_len + 7) / 8;
772

L
Linus Torvalds 已提交
773 774
		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
		BUG_ON(!aalg_desc);
775

776
		err = -EINVAL;
777
		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
778
		    crypto_aead_authsize(aead)) {
779 780 781 782
			pr_info("ESP: %s digestsize %u != %hu\n",
				x->aalg->alg_name,
				crypto_aead_authsize(aead),
				aalg_desc->uinfo.auth.icv_fullbits / 8);
783
			goto free_key;
L
Linus Torvalds 已提交
784
		}
785

786
		err = crypto_aead_setauthsize(
787
			aead, x->aalg->alg_trunc_len / 8);
788 789
		if (err)
			goto free_key;
L
Linus Torvalds 已提交
790
	}
791 792 793 794 795 796 797 798 799

	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);

	err = crypto_aead_setkey(aead, key, keylen);

free_key:
	kfree(key);

800 801 802 803 804 805 806 807 808 809 810 811 812
error:
	return err;
}

static int esp6_init_state(struct xfrm_state *x)
{
	struct crypto_aead *aead;
	u32 align;
	int err;

	if (x->encap)
		return -EINVAL;

813
	x->data = NULL;
814 815 816 817 818 819

	if (x->aead)
		err = esp_init_aead(x);
	else
		err = esp_init_authenc(x);

820
	if (err)
L
Linus Torvalds 已提交
821
		goto error;
822

823
	aead = x->data;
824

825 826
	x->props.header_len = sizeof(struct ip_esp_hdr) +
			      crypto_aead_ivsize(aead);
827 828
	switch (x->props.mode) {
	case XFRM_MODE_BEET:
829 830
		if (x->sel.family != AF_INET6)
			x->props.header_len += IPV4_BEET_PHMAXLEN +
831
					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
832
		break;
833 834 835
	case XFRM_MODE_TRANSPORT:
		break;
	case XFRM_MODE_TUNNEL:
L
Linus Torvalds 已提交
836
		x->props.header_len += sizeof(struct ipv6hdr);
837
		break;
838 839 840
	default:
		goto error;
	}
841 842

	align = ALIGN(crypto_aead_blocksize(aead), 4);
843
	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
L
Linus Torvalds 已提交
844 845

error:
846
	return err;
L
Linus Torvalds 已提交
847 848
}

849 850 851 852 853
static int esp6_rcv_cb(struct sk_buff *skb, int err)
{
	return 0;
}

854
static const struct xfrm_type esp6_type = {
L
Linus Torvalds 已提交
855
	.description	= "ESP6",
856 857
	.owner		= THIS_MODULE,
	.proto		= IPPROTO_ESP,
858
	.flags		= XFRM_TYPE_REPLAY_PROT,
L
Linus Torvalds 已提交
859 860
	.init_state	= esp6_init_state,
	.destructor	= esp6_destroy,
861
	.get_mtu	= esp6_get_mtu,
L
Linus Torvalds 已提交
862
	.input		= esp6_input,
863 864
	.output		= esp6_output,
	.hdr_offset	= xfrm6_find_1stfragopt,
L
Linus Torvalds 已提交
865 866
};

867 868 869
static struct xfrm6_protocol esp6_protocol = {
	.handler	=	xfrm6_rcv,
	.cb_handler	=	esp6_rcv_cb,
L
Linus Torvalds 已提交
870
	.err_handler	=	esp6_err,
871
	.priority	=	0,
L
Linus Torvalds 已提交
872 873 874 875 876
};

static int __init esp6_init(void)
{
	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
877
		pr_info("%s: can't add xfrm type\n", __func__);
L
Linus Torvalds 已提交
878 879
		return -EAGAIN;
	}
880
	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
881
		pr_info("%s: can't add protocol\n", __func__);
L
Linus Torvalds 已提交
882 883 884 885 886 887 888 889 890
		xfrm_unregister_type(&esp6_type, AF_INET6);
		return -EAGAIN;
	}

	return 0;
}

static void __exit esp6_fini(void)
{
891
	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
892
		pr_info("%s: can't remove protocol\n", __func__);
L
Linus Torvalds 已提交
893
	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
894
		pr_info("%s: can't remove xfrm type\n", __func__);
L
Linus Torvalds 已提交
895 896 897 898 899 900
}

module_init(esp6_init);
module_exit(esp6_fini);

MODULE_LICENSE("GPL");
901
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);