ssl_ciph.c 28.8 KB
Newer Older
1
/* ssl/ssl_ciph.c */
2
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
 * All rights reserved.
 *
 * This package is an SSL implementation written
 * by Eric Young (eay@cryptsoft.com).
 * The implementation was written so as to conform with Netscapes SSL.
 * 
 * This library is free for commercial and non-commercial use as long as
 * the following conditions are aheared to.  The following conditions
 * apply to all code found in this distribution, be it the RC4, RSA,
 * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
 * included with this distribution is covered by the same copyright terms
 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
 * 
 * Copyright remains Eric Young's, and as such any Copyright notices in
 * the code are not to be removed.
 * If this package is used in a product, Eric Young should be given attribution
 * as the author of the parts of the library used.
 * This can be in the form of a textual message at program startup or
 * in documentation (online or textual) provided with the package.
 * 
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *    "This product includes cryptographic software written by
 *     Eric Young (eay@cryptsoft.com)"
 *    The word 'cryptographic' can be left out if the rouines from the library
 *    being used are not cryptographic related :-).
 * 4. If you include any Windows specific code (or a derivative thereof) from 
 *    the apps directory (application code) you must include an acknowledgement:
 *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
 * 
 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 * 
 * The licence and distribution terms for any publically available version or
 * derivative of this code cannot be changed.  i.e. this code cannot simply be
 * copied and put under another distribution licence
 * [including the GNU Public Licence.]
 */

#include <stdio.h>
60 61
#include <openssl/objects.h>
#include <openssl/comp.h>
62 63 64 65 66 67 68 69 70
#include "ssl_locl.h"

#define SSL_ENC_DES_IDX		0
#define SSL_ENC_3DES_IDX	1
#define SSL_ENC_RC4_IDX		2
#define SSL_ENC_RC2_IDX		3
#define SSL_ENC_IDEA_IDX	4
#define SSL_ENC_eFZA_IDX	5
#define SSL_ENC_NULL_IDX	6
D
 
Dr. Stephen Henson 已提交
71 72 73
#define SSL_ENC_AES128_IDX	7
#define SSL_ENC_AES256_IDX	8
#define SSL_ENC_NUM_IDX		9
74

B
Ben Laurie 已提交
75
static const EVP_CIPHER *ssl_cipher_methods[SSL_ENC_NUM_IDX]={
76 77 78
	NULL,NULL,NULL,NULL,NULL,NULL,
	};

B
Ben Laurie 已提交
79
static STACK_OF(SSL_COMP) *ssl_comp_methods=NULL;
80

81
#define SSL_MD_MD5_IDX	0
82 83
#define SSL_MD_SHA1_IDX	1
#define SSL_MD_NUM_IDX	2
B
Ben Laurie 已提交
84
static const EVP_MD *ssl_digest_methods[SSL_MD_NUM_IDX]={
85
	NULL,NULL,
86 87 88 89 90
	};

#define CIPHER_ADD	1
#define CIPHER_KILL	2
#define CIPHER_DEL	3
91
#define CIPHER_ORD	4
92
#define CIPHER_SPECIAL	5
93

94 95 96 97 98 99 100 101
typedef struct cipher_order_st
	{
	SSL_CIPHER *cipher;
	int active;
	int dead;
	struct cipher_order_st *next,*prev;
	} CIPHER_ORDER;

102
static const SSL_CIPHER cipher_aliases[]={
103 104 105
	/* Don't include eNULL unless specifically enabled.
	 * Similarly, don't include AES in ALL because these ciphers are not yet official. */
	{0,SSL_TXT_ALL, 0,SSL_ALL & ~SSL_eNULL & ~SSL_AES, SSL_ALL ,0,0,0,SSL_ALL,SSL_ALL}, /* must be first */
106
        {0,SSL_TXT_kKRB5,0,SSL_kKRB5,0,0,0,0,SSL_MKEY_MASK,0},  /* VRS Kerberos5 */
107 108 109 110 111 112 113 114
	{0,SSL_TXT_kRSA,0,SSL_kRSA,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kDHr,0,SSL_kDHr,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kDHd,0,SSL_kDHd,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kEDH,0,SSL_kEDH,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kFZA,0,SSL_kFZA,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_DH,	0,SSL_DH,    0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_EDH,	0,SSL_EDH,   0,0,0,0,SSL_MKEY_MASK|SSL_AUTH_MASK,0},

115
	{0,SSL_TXT_aKRB5,0,SSL_aKRB5,0,0,0,0,SSL_AUTH_MASK,0},  /* VRS Kerberos5 */
116 117 118 119 120 121 122 123 124 125 126 127 128 129
	{0,SSL_TXT_aRSA,0,SSL_aRSA,  0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aDSS,0,SSL_aDSS,  0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aFZA,0,SSL_aFZA,  0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aNULL,0,SSL_aNULL,0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aDH, 0,SSL_aDH,   0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_DSS,	0,SSL_DSS,   0,0,0,0,SSL_AUTH_MASK,0},

	{0,SSL_TXT_DES,	0,SSL_DES,   0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_3DES,0,SSL_3DES,  0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_RC4,	0,SSL_RC4,   0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_RC2,	0,SSL_RC2,   0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_IDEA,0,SSL_IDEA,  0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_eNULL,0,SSL_eNULL,0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_eFZA,0,SSL_eFZA,  0,0,0,0,SSL_ENC_MASK,0},
D
 
Dr. Stephen Henson 已提交
130
	{0,SSL_TXT_AES,	0,SSL_AES,   0,0,0,0,SSL_ENC_MASK,0},
131 132 133 134 135 136

	{0,SSL_TXT_MD5,	0,SSL_MD5,   0,0,0,0,SSL_MAC_MASK,0},
	{0,SSL_TXT_SHA1,0,SSL_SHA1,  0,0,0,0,SSL_MAC_MASK,0},
	{0,SSL_TXT_SHA,	0,SSL_SHA,   0,0,0,0,SSL_MAC_MASK,0},

	{0,SSL_TXT_NULL,0,SSL_NULL,  0,0,0,0,SSL_ENC_MASK,0},
137
	{0,SSL_TXT_KRB5,0,SSL_KRB5,  0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK,0},
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
	{0,SSL_TXT_RSA,	0,SSL_RSA,   0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK,0},
	{0,SSL_TXT_ADH,	0,SSL_ADH,   0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK,0},
	{0,SSL_TXT_FZA,	0,SSL_FZA,   0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK|SSL_ENC_MASK,0},

	{0,SSL_TXT_SSLV2, 0,SSL_SSLV2, 0,0,0,0,SSL_SSL_MASK,0},
	{0,SSL_TXT_SSLV3, 0,SSL_SSLV3, 0,0,0,0,SSL_SSL_MASK,0},
	{0,SSL_TXT_TLSV1, 0,SSL_TLSV1, 0,0,0,0,SSL_SSL_MASK,0},

	{0,SSL_TXT_EXP   ,0, 0,SSL_EXPORT, 0,0,0,0,SSL_EXP_MASK},
	{0,SSL_TXT_EXPORT,0, 0,SSL_EXPORT, 0,0,0,0,SSL_EXP_MASK},
	{0,SSL_TXT_EXP40, 0, 0, SSL_EXP40, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_EXP56, 0, 0, SSL_EXP56, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_LOW,   0, 0,   SSL_LOW, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_MEDIUM,0, 0,SSL_MEDIUM, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_HIGH,  0, 0,  SSL_HIGH, 0,0,0,0,SSL_STRONG_MASK},
153 154 155 156
	};

static int init_ciphers=1;

U
Ulf Möller 已提交
157
static void load_ciphers(void)
158 159 160 161 162 163 164 165 166 167 168 169
	{
	init_ciphers=0;
	ssl_cipher_methods[SSL_ENC_DES_IDX]= 
		EVP_get_cipherbyname(SN_des_cbc);
	ssl_cipher_methods[SSL_ENC_3DES_IDX]=
		EVP_get_cipherbyname(SN_des_ede3_cbc);
	ssl_cipher_methods[SSL_ENC_RC4_IDX]=
		EVP_get_cipherbyname(SN_rc4);
	ssl_cipher_methods[SSL_ENC_RC2_IDX]= 
		EVP_get_cipherbyname(SN_rc2_cbc);
	ssl_cipher_methods[SSL_ENC_IDEA_IDX]= 
		EVP_get_cipherbyname(SN_idea_cbc);
D
 
Dr. Stephen Henson 已提交
170 171 172 173
	ssl_cipher_methods[SSL_ENC_AES128_IDX]=
	  EVP_get_cipherbyname(SN_aes_128_cbc);
	ssl_cipher_methods[SSL_ENC_AES256_IDX]=
	  EVP_get_cipherbyname(SN_aes_256_cbc);
174 175 176 177 178 179 180

	ssl_digest_methods[SSL_MD_MD5_IDX]=
		EVP_get_digestbyname(SN_md5);
	ssl_digest_methods[SSL_MD_SHA1_IDX]=
		EVP_get_digestbyname(SN_sha1);
	}

U
Ulf Möller 已提交
181 182
int ssl_cipher_get_evp(SSL_SESSION *s, const EVP_CIPHER **enc,
	     const EVP_MD **md, SSL_COMP **comp)
183 184
	{
	int i;
185
	SSL_CIPHER *c;
186

187
	c=s->cipher;
188
	if (c == NULL) return(0);
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
	if (comp != NULL)
		{
		SSL_COMP ctmp;

		if (s->compress_meth == 0)
			*comp=NULL;
		else if (ssl_comp_methods == NULL)
			{
			/* bad */
			*comp=NULL;
			}
		else
			{

			ctmp.id=s->compress_meth;
B
Ben Laurie 已提交
204
			i=sk_SSL_COMP_find(ssl_comp_methods,&ctmp);
205
			if (i >= 0)
B
Ben Laurie 已提交
206
				*comp=sk_SSL_COMP_value(ssl_comp_methods,i);
207 208 209 210 211 212
			else
				*comp=NULL;
			}
		}

	if ((enc == NULL) || (md == NULL)) return(0);
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233

	switch (c->algorithms & SSL_ENC_MASK)
		{
	case SSL_DES:
		i=SSL_ENC_DES_IDX;
		break;
	case SSL_3DES:
		i=SSL_ENC_3DES_IDX;
		break;
	case SSL_RC4:
		i=SSL_ENC_RC4_IDX;
		break;
	case SSL_RC2:
		i=SSL_ENC_RC2_IDX;
		break;
	case SSL_IDEA:
		i=SSL_ENC_IDEA_IDX;
		break;
	case SSL_eNULL:
		i=SSL_ENC_NULL_IDX;
		break;
D
 
Dr. Stephen Henson 已提交
234
	case SSL_AES:
235 236
		switch(c->alg_bits)
			{
D
 
Dr. Stephen Henson 已提交
237 238
		case 128: i=SSL_ENC_AES128_IDX; break;
		case 256: i=SSL_ENC_AES256_IDX; break;
239 240 241
		default: i=-1; break;
			}
		break;
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	default:
		i= -1;
		break;
		}

	if ((i < 0) || (i > SSL_ENC_NUM_IDX))
		*enc=NULL;
	else
		{
		if (i == SSL_ENC_NULL_IDX)
			*enc=EVP_enc_null();
		else
			*enc=ssl_cipher_methods[i];
		}

	switch (c->algorithms & SSL_MAC_MASK)
		{
	case SSL_MD5:
		i=SSL_MD_MD5_IDX;
		break;
	case SSL_SHA1:
		i=SSL_MD_SHA1_IDX;
		break;
	default:
		i= -1;
		break;
		}
	if ((i < 0) || (i > SSL_MD_NUM_IDX))
		*md=NULL;
	else
		*md=ssl_digest_methods[i];

	if ((*enc != NULL) && (*md != NULL))
		return(1);
	else
		return(0);
	}

280 281 282
#define ITEM_SEP(a) \
	(((a) == ':') || ((a) == ' ') || ((a) == ';') || ((a) == ','))

U
Ulf Möller 已提交
283 284
static void ll_append_tail(CIPHER_ORDER **head, CIPHER_ORDER *curr,
	     CIPHER_ORDER **tail)
285 286 287 288 289 290 291 292 293 294 295 296 297 298
	{
	if (curr == *tail) return;
	if (curr == *head)
		*head=curr->next;
	if (curr->prev != NULL)
		curr->prev->next=curr->next;
	if (curr->next != NULL) /* should always be true */
		curr->next->prev=curr->prev;
	(*tail)->next=curr;
	curr->prev= *tail;
	curr->next=NULL;
	*tail=curr;
	}

299
static unsigned long ssl_cipher_get_disabled(void)
300
	{
301
	unsigned long mask;
302

303
	mask = SSL_kFZA;
304
#ifdef OPENSSL_NO_RSA
305
	mask |= SSL_aRSA|SSL_kRSA;
306
#endif
307
#ifdef OPENSSL_NO_DSA
308
	mask |= SSL_aDSS;
309
#endif
310
#ifdef OPENSSL_NO_DH
311
	mask |= SSL_kDHr|SSL_kDHd|SSL_kEDH|SSL_aDH;
312
#endif
313
#ifdef OPENSSL_NO_KRB5
314 315
	mask |= SSL_kKRB5|SSL_aKRB5;
#endif
316

B
Ben Laurie 已提交
317
#ifdef SSL_FORBID_ENULL
318
	mask |= SSL_eNULL;
319 320
#endif

321 322 323 324 325 326
	mask |= (ssl_cipher_methods[SSL_ENC_DES_IDX ] == NULL) ? SSL_DES :0;
	mask |= (ssl_cipher_methods[SSL_ENC_3DES_IDX] == NULL) ? SSL_3DES:0;
	mask |= (ssl_cipher_methods[SSL_ENC_RC4_IDX ] == NULL) ? SSL_RC4 :0;
	mask |= (ssl_cipher_methods[SSL_ENC_RC2_IDX ] == NULL) ? SSL_RC2 :0;
	mask |= (ssl_cipher_methods[SSL_ENC_IDEA_IDX] == NULL) ? SSL_IDEA:0;
	mask |= (ssl_cipher_methods[SSL_ENC_eFZA_IDX] == NULL) ? SSL_eFZA:0;
D
 
Dr. Stephen Henson 已提交
327
	mask |= (ssl_cipher_methods[SSL_ENC_AES128_IDX] == NULL) ? SSL_AES:0;
328

329 330
	mask |= (ssl_digest_methods[SSL_MD_MD5_IDX ] == NULL) ? SSL_MD5 :0;
	mask |= (ssl_digest_methods[SSL_MD_SHA1_IDX] == NULL) ? SSL_SHA1:0;
331

332 333 334 335 336 337 338
	return(mask);
	}

static void ssl_cipher_collect_ciphers(const SSL_METHOD *ssl_method,
		int num_of_ciphers, unsigned long mask, CIPHER_ORDER *list,
		CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p)
	{
D
 
Dr. Stephen Henson 已提交
339
	int i, list_num;
340 341 342 343 344 345 346 347
	SSL_CIPHER *c;

	/*
	 * We have num_of_ciphers descriptions compiled in, depending on the
	 * method selected (SSLv2 and/or SSLv3, TLSv1 etc).
	 * These will later be sorted in a linked list with at most num
	 * entries.
	 */
348

349
	/* Get the initial list of ciphers */
350 351
	list_num = 0;	/* actual count of ciphers */
	for (i = 0; i < num_of_ciphers; i++)
352
		{
353
		c = ssl_method->get_cipher(i);
354 355 356
		/* drop those that use any of that is not available */
		if ((c != NULL) && c->valid && !(c->algorithms & mask))
			{
357 358 359 360
			list[list_num].cipher = c;
			list[list_num].next = NULL;
			list[list_num].prev = NULL;
			list[list_num].active = 0;
361
			list_num++;
362 363 364
#ifdef KSSL_DEBUG
			printf("\t%d: %s %lx %lx\n",i,c->name,c->id,c->algorithms);
#endif	/* KSSL_DEBUG */
365
			/*
366
			if (!sk_push(ca_list,(char *)c)) goto err;
367
			*/
368 369
			}
		}
370 371 372 373 374

	/*
	 * Prepare linked list from list entries
	 */	
	for (i = 1; i < list_num - 1; i++)
375
		{
376 377
		list[i].prev = &(list[i-1]);
		list[i].next = &(list[i+1]);
378 379 380
		}
	if (list_num > 0)
		{
381 382 383 384 385 386
		(*head_p) = &(list[0]);
		(*head_p)->prev = NULL;
		(*head_p)->next = &(list[1]);
		(*tail_p) = &(list[list_num - 1]);
		(*tail_p)->prev = &(list[list_num - 2]);
		(*tail_p)->next = NULL;
387
		}
388
	}
389

390 391 392 393 394 395 396
static void ssl_cipher_collect_aliases(SSL_CIPHER **ca_list,
			int num_of_group_aliases, unsigned long mask,
			CIPHER_ORDER *head)
	{
	CIPHER_ORDER *ciph_curr;
	SSL_CIPHER **ca_curr;
	int i;
397

398 399 400 401 402 403
	/*
	 * First, add the real ciphers as already collected
	 */
	ciph_curr = head;
	ca_curr = ca_list;
	while (ciph_curr != NULL)
404
		{
405 406 407
		*ca_curr = ciph_curr->cipher;
		ca_curr++;
		ciph_curr = ciph_curr->next;
408 409
		}

410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	/*
	 * Now we add the available ones from the cipher_aliases[] table.
	 * They represent either an algorithm, that must be fully
	 * supported (not match any bit in mask) or represent a cipher
	 * strength value (will be added in any case because algorithms=0).
	 */
	for (i = 0; i < num_of_group_aliases; i++)
		{
		if ((i == 0) ||		/* always fetch "ALL" */
		    !(cipher_aliases[i].algorithms & mask))
			{
			*ca_curr = (SSL_CIPHER *)(cipher_aliases + i);
			ca_curr++;
			}
		}
425

426 427
	*ca_curr = NULL;	/* end of list */
	}
428

429 430 431 432 433 434 435 436 437 438 439 440 441 442
static void ssl_cipher_apply_rule(unsigned long algorithms, unsigned long mask,
		unsigned long algo_strength, unsigned long mask_strength,
		int rule, int strength_bits, CIPHER_ORDER *list,
		CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p)
	{
	CIPHER_ORDER *head, *tail, *curr, *curr2, *tail2;
	SSL_CIPHER *cp;
	unsigned long ma, ma_s;

#ifdef CIPHER_DEBUG
	printf("Applying rule %d with %08lx %08lx %08lx %08lx (%d)\n",
		rule, algorithms, mask, algo_strength, mask_strength,
		strength_bits);
#endif
443

444 445 446
	curr = head = *head_p;
	curr2 = head;
	tail2 = tail = *tail_p;
447 448
	for (;;)
		{
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
		if ((curr == NULL) || (curr == tail2)) break;
		curr = curr2;
		curr2 = curr->next;

		cp = curr->cipher;

		/*
		 * Selection criteria is either the number of strength_bits
		 * or the algorithm used.
		 */
		if (strength_bits == -1)
			{
			ma = mask & cp->algorithms;
			ma_s = mask_strength & cp->algo_strength;

#ifdef CIPHER_DEBUG
			printf("\nName: %s:\nAlgo = %08lx Algo_strength = %08lx\nMask = %08lx Mask_strength %08lx\n", cp->name, cp->algorithms, cp->algo_strength, mask, mask_strength);
			printf("ma = %08lx ma_s %08lx, ma&algo=%08lx, ma_s&algos=%08lx\n", ma, ma_s, ma&algorithms, ma_s&algo_strength);
#endif
			/*
			 * Select: if none of the mask bit was met from the
			 * cipher or not all of the bits were met, the
			 * selection does not apply.
			 */
			if (((ma == 0) && (ma_s == 0)) ||
			    ((ma & algorithms) != ma) ||
			    ((ma_s & algo_strength) != ma_s))
				continue; /* does not apply */
			}
		else if (strength_bits != cp->strength_bits)
			continue;	/* does not apply */

#ifdef CIPHER_DEBUG
		printf("Action = %d\n", rule);
#endif

		/* add the cipher if it has not been added yet. */
		if (rule == CIPHER_ADD)
			{
			if (!curr->active)
				{
				ll_append_tail(&head, curr, &tail);
				curr->active = 1;
				}
			}
		/* Move the added cipher to this location */
		else if (rule == CIPHER_ORD)
			{
			if (curr->active)
				{
				ll_append_tail(&head, curr, &tail);
				}
			}
		else if	(rule == CIPHER_DEL)
			curr->active = 0;
		else if (rule == CIPHER_KILL)
			{
			if (head == curr)
				head = curr->next;
			else
				curr->prev->next = curr->next;
			if (tail == curr)
				tail = curr->prev;
			curr->active = 0;
			if (curr->next != NULL)
				curr->next->prev = curr->prev;
			if (curr->prev != NULL)
				curr->prev->next = curr->next;
			curr->next = NULL;
			curr->prev = NULL;
			}
		}

	*head_p = head;
	*tail_p = tail;
	}

static int ssl_cipher_strength_sort(CIPHER_ORDER *list, CIPHER_ORDER **head_p,
				     CIPHER_ORDER **tail_p)
	{
	int max_strength_bits, i, *number_uses;
	CIPHER_ORDER *curr;

	/*
	 * This routine sorts the ciphers with descending strength. The sorting
	 * must keep the pre-sorted sequence, so we apply the normal sorting
	 * routine as '+' movement to the end of the list.
	 */
	max_strength_bits = 0;
	curr = *head_p;
	while (curr != NULL)
		{
		if (curr->active &&
		    (curr->cipher->strength_bits > max_strength_bits))
		    max_strength_bits = curr->cipher->strength_bits;
		curr = curr->next;
		}

547
	number_uses = OPENSSL_malloc((max_strength_bits + 1) * sizeof(int));
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	if (!number_uses)
	{
		SSLerr(SSL_F_SSL_CIPHER_STRENGTH_SORT,ERR_R_MALLOC_FAILURE);
		return(0);
	}
	memset(number_uses, 0, (max_strength_bits + 1) * sizeof(int));

	/*
	 * Now find the strength_bits values actually used
	 */
	curr = *head_p;
	while (curr != NULL)
		{
		if (curr->active)
			number_uses[curr->cipher->strength_bits]++;
		curr = curr->next;
		}
	/*
	 * Go through the list of used strength_bits values in descending
567
	 * order.
568 569 570 571 572 573
	 */
	for (i = max_strength_bits; i >= 0; i--)
		if (number_uses[i] > 0)
			ssl_cipher_apply_rule(0, 0, 0, 0, CIPHER_ORD, i,
					list, head_p, tail_p);

574
	OPENSSL_free(number_uses);
575 576 577 578 579 580 581 582 583 584 585
	return(1);
	}

static int ssl_cipher_process_rulestr(const char *rule_str,
		CIPHER_ORDER *list, CIPHER_ORDER **head_p,
		CIPHER_ORDER **tail_p, SSL_CIPHER **ca_list)
	{
	unsigned long algorithms, mask, algo_strength, mask_strength;
	const char *l, *start, *buf;
	int j, multi, found, rule, retval, ok, buflen;
	char ch;
586

587 588 589 590 591
	retval = 1;
	l = rule_str;
	for (;;)
		{
		ch = *l;
592

593 594
		if (ch == '\0')
			break;		/* done */
595
		if (ch == '-')
596
			{ rule = CIPHER_DEL; l++; }
597
		else if (ch == '+')
598
			{ rule = CIPHER_ORD; l++; }
599
		else if (ch == '!')
600 601 602 603 604
			{ rule = CIPHER_KILL; l++; }
		else if (ch == '@')
			{ rule = CIPHER_SPECIAL; l++; }
		else
			{ rule = CIPHER_ADD; }
605

606
		if (ITEM_SEP(ch))
607 608 609 610
			{
			l++;
			continue;
			}
611 612

		algorithms = mask = algo_strength = mask_strength = 0;
613 614 615 616

		start=l;
		for (;;)
			{
617 618 619
			ch = *l;
			buf = l;
			buflen = 0;
620
#ifndef CHARSET_EBCDIC
621 622 623 624
			while (	((ch >= 'A') && (ch <= 'Z')) ||
				((ch >= '0') && (ch <= '9')) ||
				((ch >= 'a') && (ch <= 'z')) ||
				 (ch == '-'))
625 626 627
#else
			while (	isalnum(ch) || (ch == '-'))
#endif
628
				 {
629 630
				 ch = *(++l);
				 buflen++;
631
				 }
632 633 634 635

			if (buflen == 0)
				{
				/*
636
				 * We hit something we cannot deal with,
U
Ulf Möller 已提交
637
				 * it is no command or separator nor
638 639 640 641 642 643 644 645 646 647 648
				 * alphanumeric, so we call this an error.
				 */
				SSLerr(SSL_F_SSL_CIPHER_PROCESS_RULESTR,
				       SSL_R_INVALID_COMMAND);
				retval = found = 0;
				l++;
				break;
				}

			if (rule == CIPHER_SPECIAL)
				{
649
				found = 0; /* unused -- avoid compiler warning */
650 651
				break;	/* special treatment */
				}
652 653

			/* check for multi-part specification */
654 655 656 657 658 659 660
			if (ch == '+')
				{
				multi=1;
				l++;
				}
			else
				multi=0;
661

662
			/*
663
			 * Now search for the cipher alias in the ca_list. Be careful
664 665 666
			 * with the strncmp, because the "buflen" limitation
			 * will make the rule "ADH:SOME" and the cipher
			 * "ADH-MY-CIPHER" look like a match for buflen=3.
667 668
			 * So additionally check whether the cipher name found
			 * has the correct length. We can save a strlen() call:
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
			 * just checking for the '\0' at the right place is
			 * sufficient, we have to strncmp() anyway.
			 */
			 j = found = 0;
			 while (ca_list[j])
				{
				if ((ca_list[j]->name[buflen] == '\0') &&
				    !strncmp(buf, ca_list[j]->name, buflen))
					{
					found = 1;
					break;
					}
				else
					j++;
				}
			if (!found)
				break;	/* ignore this entry */

			algorithms |= ca_list[j]->algorithms;
			mask |= ca_list[j]->mask;
			algo_strength |= ca_list[j]->algo_strength;
			mask_strength |= ca_list[j]->mask_strength;
691 692 693

			if (!multi) break;
			}
694

695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
		/*
		 * Ok, we have the rule, now apply it
		 */
		if (rule == CIPHER_SPECIAL)
			{	/* special command */
			ok = 0;
			if ((buflen == 8) &&
				!strncmp(buf, "STRENGTH", 8))
				ok = ssl_cipher_strength_sort(list,
					head_p, tail_p);
			else
				SSLerr(SSL_F_SSL_CIPHER_PROCESS_RULESTR,
					SSL_R_INVALID_COMMAND);
			if (ok == 0)
				retval = 0;
710
			/*
711 712 713 714
			 * We do not support any "multi" options
			 * together with "@", so throw away the
			 * rest of the command, if any left, until
			 * end or ':' is found.
715
			 */
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
			while ((*l != '\0') && ITEM_SEP(*l))
				l++;
			}
		else if (found)
			{
			ssl_cipher_apply_rule(algorithms, mask,
				algo_strength, mask_strength, rule, -1,
				list, head_p, tail_p);
			}
		else
			{
			while ((*l != '\0') && ITEM_SEP(*l))
				l++;
			}
		if (*l == '\0') break; /* done */
731 732
		}

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
	return(retval);
	}

STACK_OF(SSL_CIPHER) *ssl_create_cipher_list(const SSL_METHOD *ssl_method,
		STACK_OF(SSL_CIPHER) **cipher_list,
		STACK_OF(SSL_CIPHER) **cipher_list_by_id,
		const char *rule_str)
	{
	int ok, num_of_ciphers, num_of_alias_max, num_of_group_aliases;
	unsigned long disabled_mask;
	STACK_OF(SSL_CIPHER) *cipherstack;
	const char *rule_p;
	CIPHER_ORDER *list = NULL, *head = NULL, *tail = NULL, *curr;
	SSL_CIPHER **ca_list = NULL;

	/*
	 * Return with error if nothing to do.
	 */
	if (rule_str == NULL) return(NULL);

	if (init_ciphers) load_ciphers();
754

755 756 757 758 759 760 761 762 763 764 765 766
	/*
	 * To reduce the work to do we only want to process the compiled
	 * in algorithms, so we first get the mask of disabled ciphers.
	 */
	disabled_mask = ssl_cipher_get_disabled();

	/*
	 * Now we have to collect the available ciphers from the compiled
	 * in ciphers. We cannot get more than the number compiled in, so
	 * it is used for allocation.
	 */
	num_of_ciphers = ssl_method->num_ciphers();
767 768 769
#ifdef KSSL_DEBUG
	printf("ssl_create_cipher_list() for %d ciphers\n", num_of_ciphers);
#endif    /* KSSL_DEBUG */
770
	list = (CIPHER_ORDER *)OPENSSL_malloc(sizeof(CIPHER_ORDER) * num_of_ciphers);
771
	if (list == NULL)
772
		{
773 774 775
		SSLerr(SSL_F_SSL_CREATE_CIPHER_LIST,ERR_R_MALLOC_FAILURE);
		return(NULL);	/* Failure */
		}
776

777 778 779 780 781 782 783 784
	ssl_cipher_collect_ciphers(ssl_method, num_of_ciphers, disabled_mask,
				   list, &head, &tail);

	/*
	 * We also need cipher aliases for selecting based on the rule_str.
	 * There might be two types of entries in the rule_str: 1) names
	 * of ciphers themselves 2) aliases for groups of ciphers.
	 * For 1) we need the available ciphers and for 2) the cipher
U
Ulf Möller 已提交
785
	 * groups of cipher_aliases added together in one list (otherwise
786 787 788 789 790
	 * we would be happy with just the cipher_aliases table).
	 */
	num_of_group_aliases = sizeof(cipher_aliases) / sizeof(SSL_CIPHER);
	num_of_alias_max = num_of_ciphers + num_of_group_aliases + 1;
	ca_list =
791
		(SSL_CIPHER **)OPENSSL_malloc(sizeof(SSL_CIPHER *) * num_of_alias_max);
792 793
	if (ca_list == NULL)
		{
794
		OPENSSL_free(list);
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
		SSLerr(SSL_F_SSL_CREATE_CIPHER_LIST,ERR_R_MALLOC_FAILURE);
		return(NULL);	/* Failure */
		}
	ssl_cipher_collect_aliases(ca_list, num_of_group_aliases, disabled_mask,
				   head);

	/*
	 * If the rule_string begins with DEFAULT, apply the default rule
	 * before using the (possibly available) additional rules.
	 */
	ok = 1;
	rule_p = rule_str;
	if (strncmp(rule_str,"DEFAULT",7) == 0)
		{
		ok = ssl_cipher_process_rulestr(SSL_DEFAULT_CIPHER_LIST,
			list, &head, &tail, ca_list);
		rule_p += 7;
		if (*rule_p == ':')
			rule_p++;
		}
815

816 817 818
	if (ok && (strlen(rule_p) > 0))
		ok = ssl_cipher_process_rulestr(rule_p, list, &head, &tail,
						ca_list);
819

820
	OPENSSL_free(ca_list);	/* Not needed anymore */
821 822 823

	if (!ok)
		{	/* Rule processing failure */
824
		OPENSSL_free(list);
825 826 827 828 829 830
		return(NULL);
		}
	/*
	 * Allocate new "cipherstack" for the result, return with error
	 * if we cannot get one.
	 */
831
	if ((cipherstack = sk_SSL_CIPHER_new_null()) == NULL)
832
		{
833
		OPENSSL_free(list);
834
		return(NULL);
835 836
		}

837 838 839 840 841
	/*
	 * The cipher selection for the list is done. The ciphers are added
	 * to the resulting precedence to the STACK_OF(SSL_CIPHER).
	 */
	for (curr = head; curr != NULL; curr = curr->next)
842
		{
843
		if (curr->active)
844
			{
845
			sk_SSL_CIPHER_push(cipherstack, curr->cipher);
846
#ifdef CIPHER_DEBUG
847
			printf("<%s>\n",curr->cipher->name);
848 849 850
#endif
			}
		}
851
	OPENSSL_free(list);	/* Not needed any longer */
852 853 854 855 856 857 858 859 860

	/*
	 * The following passage is a little bit odd. If pointer variables
	 * were supplied to hold STACK_OF(SSL_CIPHER) return information,
	 * the old memory pointed to is free()ed. Then, however, the
	 * cipher_list entry will be assigned just a copy of the returned
	 * cipher stack. For cipher_list_by_id a copy of the cipher stack
	 * will be created. See next comment...
	 */
861 862 863
	if (cipher_list != NULL)
		{
		if (*cipher_list != NULL)
B
Ben Laurie 已提交
864
			sk_SSL_CIPHER_free(*cipher_list);
865
		*cipher_list = cipherstack;
866 867 868 869 870
		}

	if (cipher_list_by_id != NULL)
		{
		if (*cipher_list_by_id != NULL)
B
Ben Laurie 已提交
871
			sk_SSL_CIPHER_free(*cipher_list_by_id);
872
		*cipher_list_by_id = sk_SSL_CIPHER_dup(cipherstack);
873 874
		}

875 876
	/*
	 * Now it is getting really strange. If something failed during
U
Ulf Möller 已提交
877
	 * the previous pointer assignment or if one of the pointers was
878 879 880 881 882 883
	 * not requested, the error condition is met. That might be
	 * discussable. The strange thing is however that in this case
	 * the memory "ret" pointed to is "free()ed" and hence the pointer
	 * cipher_list becomes wild. The memory reserved for
	 * cipher_list_by_id however is not "free()ed" and stays intact.
	 */
884 885 886 887
	if (	(cipher_list_by_id == NULL) ||
		(*cipher_list_by_id == NULL) ||
		(cipher_list == NULL) ||
		(*cipher_list == NULL))
888 889 890 891 892
		{
		sk_SSL_CIPHER_free(cipherstack);
		return(NULL);
		}

B
Ben Laurie 已提交
893
	sk_SSL_CIPHER_set_cmp_func(*cipher_list_by_id,ssl_cipher_ptr_id_cmp);
894

895
	return(cipherstack);
896 897
	}

U
Ulf Möller 已提交
898
char *SSL_CIPHER_description(SSL_CIPHER *cipher, char *buf, int len)
899
	{
900
	int is_export,pkl,kl;
901 902
	char *ver,*exp;
	char *kx,*au,*enc,*mac;
903
	unsigned long alg,alg2,alg_s;
904 905 906
#ifdef KSSL_DEBUG
	static char *format="%-23s %s Kx=%-8s Au=%-4s Enc=%-9s Mac=%-4s%s AL=%lx\n";
#else
907
	static char *format="%-23s %s Kx=%-8s Au=%-4s Enc=%-9s Mac=%-4s%s\n";
908 909
#endif /* KSSL_DEBUG */

910
	alg=cipher->algorithms;
911
	alg_s=cipher->algo_strength;
912 913
	alg2=cipher->algorithm2;

914 915 916
	is_export=SSL_C_IS_EXPORT(cipher);
	pkl=SSL_C_EXPORT_PKEYLENGTH(cipher);
	kl=SSL_C_EXPORT_KEYLENGTH(cipher);
917
	exp=is_export?" export":"";
918 919 920 921 922 923 924 925 926 927 928

	if (alg & SSL_SSLV2)
		ver="SSLv2";
	else if (alg & SSL_SSLV3)
		ver="SSLv3";
	else
		ver="unknown";

	switch (alg&SSL_MKEY_MASK)
		{
	case SSL_kRSA:
929
		kx=is_export?(pkl == 512 ? "RSA(512)" : "RSA(1024)"):"RSA";
930 931 932 933 934 935 936
		break;
	case SSL_kDHr:
		kx="DH/RSA";
		break;
	case SSL_kDHd:
		kx="DH/DSS";
		break;
937 938 939 940
        case SSL_kKRB5:         /* VRS */
        case SSL_KRB5:          /* VRS */
            kx="KRB5";
            break;
941 942 943 944
	case SSL_kFZA:
		kx="Fortezza";
		break;
	case SSL_kEDH:
945
		kx=is_export?(pkl == 512 ? "DH(512)" : "DH(1024)"):"DH";
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
		break;
	default:
		kx="unknown";
		}

	switch (alg&SSL_AUTH_MASK)
		{
	case SSL_aRSA:
		au="RSA";
		break;
	case SSL_aDSS:
		au="DSS";
		break;
	case SSL_aDH:
		au="DH";
		break;
962 963 964 965
        case SSL_aKRB5:         /* VRS */
        case SSL_KRB5:          /* VRS */
            au="KRB5";
            break;
966 967 968 969 970 971 972 973 974 975 976 977
	case SSL_aFZA:
	case SSL_aNULL:
		au="None";
		break;
	default:
		au="unknown";
		break;
		}

	switch (alg&SSL_ENC_MASK)
		{
	case SSL_DES:
978
		enc=(is_export && kl == 5)?"DES(40)":"DES(56)";
979 980 981 982 983
		break;
	case SSL_3DES:
		enc="3DES(168)";
		break;
	case SSL_RC4:
984
		enc=is_export?(kl == 5 ? "RC4(40)" : "RC4(56)")
985
		  :((alg2&SSL2_CF_8_BYTE_ENC)?"RC4(64)":"RC4(128)");
986 987
		break;
	case SSL_RC2:
988
		enc=is_export?(kl == 5 ? "RC2(40)" : "RC2(56)"):"RC2(128)";
989 990 991 992 993 994 995 996 997 998
		break;
	case SSL_IDEA:
		enc="IDEA(128)";
		break;
	case SSL_eFZA:
		enc="Fortezza";
		break;
	case SSL_eNULL:
		enc="None";
		break;
D
 
Dr. Stephen Henson 已提交
999 1000
	case SSL_AES:
		switch(cipher->strength_bits)
1001
			{
1002 1003 1004 1005
		case 128: enc="AESdraft(128)"; break;
		case 192: enc="AESdraft(192)"; break;
		case 256: enc="AESdraft(256)"; break;
		default: enc="AESdraft(?""?""?)"; break;
1006 1007
			}
		break;
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	default:
		enc="unknown";
		break;
		}

	switch (alg&SSL_MAC_MASK)
		{
	case SSL_MD5:
		mac="MD5";
		break;
	case SSL_SHA1:
		mac="SHA1";
		break;
	default:
		mac="unknown";
		break;
		}

	if (buf == NULL)
		{
B
Bodo Möller 已提交
1028
		len=128;
1029 1030
		buf=OPENSSL_malloc(len);
		if (buf == NULL) return("OPENSSL_malloc Error");
1031 1032 1033 1034
		}
	else if (len < 128)
		return("Buffer too small");

1035 1036 1037
#ifdef KSSL_DEBUG
	BIO_snprintf(buf,len,format,cipher->name,ver,kx,au,enc,mac,exp,alg);
#else
B
Bodo Möller 已提交
1038
	BIO_snprintf(buf,len,format,cipher->name,ver,kx,au,enc,mac,exp);
1039
#endif /* KSSL_DEBUG */
1040 1041 1042
	return(buf);
	}

U
Ulf Möller 已提交
1043
char *SSL_CIPHER_get_version(SSL_CIPHER *c)
1044 1045 1046
	{
	int i;

1047
	if (c == NULL) return("(NONE)");
1048 1049
	i=(int)(c->id>>24L);
	if (i == 3)
1050
		return("TLSv1/SSLv3");
1051 1052 1053 1054 1055 1056 1057
	else if (i == 2)
		return("SSLv2");
	else
		return("unknown");
	}

/* return the actual cipher being used */
U
Ulf Möller 已提交
1058
const char *SSL_CIPHER_get_name(SSL_CIPHER *c)
1059 1060 1061 1062 1063 1064
	{
	if (c != NULL)
		return(c->name);
	return("(NONE)");
	}

U
Ulf Möller 已提交
1065
/* number of bits for symmetric cipher */
U
Ulf Möller 已提交
1066
int SSL_CIPHER_get_bits(SSL_CIPHER *c, int *alg_bits)
1067
	{
1068
	int ret=0;
1069 1070 1071

	if (c != NULL)
		{
1072 1073
		if (alg_bits != NULL) *alg_bits = c->alg_bits;
		ret = c->strength_bits;
1074 1075 1076 1077
		}
	return(ret);
	}

U
Ulf Möller 已提交
1078
SSL_COMP *ssl3_comp_find(STACK_OF(SSL_COMP) *sk, int n)
1079 1080 1081 1082 1083
	{
	SSL_COMP *ctmp;
	int i,nn;

	if ((n == 0) || (sk == NULL)) return(NULL);
B
Ben Laurie 已提交
1084
	nn=sk_SSL_COMP_num(sk);
1085 1086
	for (i=0; i<nn; i++)
		{
B
Ben Laurie 已提交
1087
		ctmp=sk_SSL_COMP_value(sk,i);
1088 1089 1090 1091 1092 1093
		if (ctmp->id == n)
			return(ctmp);
		}
	return(NULL);
	}

1094 1095
static int sk_comp_cmp(const SSL_COMP * const *a,
			const SSL_COMP * const *b)
1096 1097 1098 1099
	{
	return((*a)->id-(*b)->id);
	}

U
Ulf Möller 已提交
1100
STACK_OF(SSL_COMP) *SSL_COMP_get_compression_methods(void)
1101 1102 1103 1104
	{
	return(ssl_comp_methods);
	}

U
Ulf Möller 已提交
1105
int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm)
1106 1107
	{
	SSL_COMP *comp;
B
Ben Laurie 已提交
1108
	STACK_OF(SSL_COMP) *sk;
1109

1110 1111 1112
        if (cm == NULL || cm->type == NID_undef)
                return 1;

1113
	MemCheck_off();
1114
	comp=(SSL_COMP *)OPENSSL_malloc(sizeof(SSL_COMP));
1115 1116 1117
	comp->id=id;
	comp->method=cm;
	if (ssl_comp_methods == NULL)
B
Ben Laurie 已提交
1118
		sk=ssl_comp_methods=sk_SSL_COMP_new(sk_comp_cmp);
1119 1120
	else
		sk=ssl_comp_methods;
B
Ben Laurie 已提交
1121
	if ((sk == NULL) || !sk_SSL_COMP_push(sk,comp))
1122
		{
1123
		MemCheck_on();
1124 1125 1126 1127
		SSLerr(SSL_F_SSL_COMP_ADD_COMPRESSION_METHOD,ERR_R_MALLOC_FAILURE);
		return(0);
		}
	else
1128 1129
		{
		MemCheck_on();
1130
		return(1);
1131
		}
1132
	}