ssl_ciph.c 31.3 KB
Newer Older
1
/* ssl/ssl_ciph.c */
2
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 * All rights reserved.
 *
 * This package is an SSL implementation written
 * by Eric Young (eay@cryptsoft.com).
 * The implementation was written so as to conform with Netscapes SSL.
 * 
 * This library is free for commercial and non-commercial use as long as
 * the following conditions are aheared to.  The following conditions
 * apply to all code found in this distribution, be it the RC4, RSA,
 * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
 * included with this distribution is covered by the same copyright terms
 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
 * 
 * Copyright remains Eric Young's, and as such any Copyright notices in
 * the code are not to be removed.
 * If this package is used in a product, Eric Young should be given attribution
 * as the author of the parts of the library used.
 * This can be in the form of a textual message at program startup or
 * in documentation (online or textual) provided with the package.
 * 
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *    "This product includes cryptographic software written by
 *     Eric Young (eay@cryptsoft.com)"
 *    The word 'cryptographic' can be left out if the rouines from the library
 *    being used are not cryptographic related :-).
 * 4. If you include any Windows specific code (or a derivative thereof) from 
 *    the apps directory (application code) you must include an acknowledgement:
 *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
 * 
 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 * 
 * The licence and distribution terms for any publically available version or
 * derivative of this code cannot be changed.  i.e. this code cannot simply be
 * copied and put under another distribution licence
 * [including the GNU Public Licence.]
 */
B
Bodo Möller 已提交
58 59 60 61 62
/* ====================================================================
 * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
 * ECC cipher suite support in OpenSSL originally developed by 
 * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
 */
63
#include <stdio.h>
64 65
#include <openssl/objects.h>
#include <openssl/comp.h>
66 67 68 69 70 71 72 73 74
#include "ssl_locl.h"

#define SSL_ENC_DES_IDX		0
#define SSL_ENC_3DES_IDX	1
#define SSL_ENC_RC4_IDX		2
#define SSL_ENC_RC2_IDX		3
#define SSL_ENC_IDEA_IDX	4
#define SSL_ENC_eFZA_IDX	5
#define SSL_ENC_NULL_IDX	6
D
 
Dr. Stephen Henson 已提交
75 76 77
#define SSL_ENC_AES128_IDX	7
#define SSL_ENC_AES256_IDX	8
#define SSL_ENC_NUM_IDX		9
78

B
Ben Laurie 已提交
79
static const EVP_CIPHER *ssl_cipher_methods[SSL_ENC_NUM_IDX]={
80 81 82
	NULL,NULL,NULL,NULL,NULL,NULL,
	};

83 84 85 86
#define SSL_COMP_NULL_IDX	0
#define SSL_COMP_ZLIB_IDX	1
#define SSL_COMP_NUM_IDX	2

B
Ben Laurie 已提交
87
static STACK_OF(SSL_COMP) *ssl_comp_methods=NULL;
88

89
#define SSL_MD_MD5_IDX	0
90 91
#define SSL_MD_SHA1_IDX	1
#define SSL_MD_NUM_IDX	2
B
Ben Laurie 已提交
92
static const EVP_MD *ssl_digest_methods[SSL_MD_NUM_IDX]={
93
	NULL,NULL,
94 95 96 97 98
	};

#define CIPHER_ADD	1
#define CIPHER_KILL	2
#define CIPHER_DEL	3
99
#define CIPHER_ORD	4
100
#define CIPHER_SPECIAL	5
101

102 103 104 105 106 107 108 109
typedef struct cipher_order_st
	{
	SSL_CIPHER *cipher;
	int active;
	int dead;
	struct cipher_order_st *next,*prev;
	} CIPHER_ORDER;

110
static const SSL_CIPHER cipher_aliases[]={
111
	/* Don't include eNULL unless specifically enabled. */
B
Bodo Möller 已提交
112 113 114 115
	/* Don't include ECC in ALL because these ciphers are not yet official. */
	{0,SSL_TXT_ALL, 0,SSL_ALL & ~SSL_eNULL & ~SSL_kECDH & ~SSL_kECDHE, SSL_ALL ,0,0,0,SSL_ALL,SSL_ALL}, /* must be first */
	/* TODO: COMPLEMENT OF ALL and COMPLEMENT OF DEFAULT do not have ECC cipher suites handled properly. */
	{0,SSL_TXT_CMPALL,0,SSL_eNULL,0,0,0,0,SSL_ENC_MASK,0},  /* COMPLEMENT OF ALL */
116
	{0,SSL_TXT_CMPDEF,0,SSL_ADH, 0,0,0,0,SSL_AUTH_MASK,0},
B
Bodo Möller 已提交
117
	{0,SSL_TXT_kKRB5,0,SSL_kKRB5,0,0,0,0,SSL_MKEY_MASK,0},  /* VRS Kerberos5 */
118 119 120 121 122 123
	{0,SSL_TXT_kRSA,0,SSL_kRSA,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kDHr,0,SSL_kDHr,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kDHd,0,SSL_kDHd,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kEDH,0,SSL_kEDH,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_kFZA,0,SSL_kFZA,  0,0,0,0,SSL_MKEY_MASK,0},
	{0,SSL_TXT_DH,	0,SSL_DH,    0,0,0,0,SSL_MKEY_MASK,0},
B
Bodo Möller 已提交
124
	{0,SSL_TXT_ECC,	0,(SSL_kECDH|SSL_kECDHE), 0,0,0,0,SSL_MKEY_MASK,0},
125
	{0,SSL_TXT_EDH,	0,SSL_EDH,   0,0,0,0,SSL_MKEY_MASK|SSL_AUTH_MASK,0},
126
	{0,SSL_TXT_aKRB5,0,SSL_aKRB5,0,0,0,0,SSL_AUTH_MASK,0},  /* VRS Kerberos5 */
127 128 129 130 131 132 133 134 135 136 137 138 139 140
	{0,SSL_TXT_aRSA,0,SSL_aRSA,  0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aDSS,0,SSL_aDSS,  0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aFZA,0,SSL_aFZA,  0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aNULL,0,SSL_aNULL,0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_aDH, 0,SSL_aDH,   0,0,0,0,SSL_AUTH_MASK,0},
	{0,SSL_TXT_DSS,	0,SSL_DSS,   0,0,0,0,SSL_AUTH_MASK,0},

	{0,SSL_TXT_DES,	0,SSL_DES,   0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_3DES,0,SSL_3DES,  0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_RC4,	0,SSL_RC4,   0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_RC2,	0,SSL_RC2,   0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_IDEA,0,SSL_IDEA,  0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_eNULL,0,SSL_eNULL,0,0,0,0,SSL_ENC_MASK,0},
	{0,SSL_TXT_eFZA,0,SSL_eFZA,  0,0,0,0,SSL_ENC_MASK,0},
D
 
Dr. Stephen Henson 已提交
141
	{0,SSL_TXT_AES,	0,SSL_AES,   0,0,0,0,SSL_ENC_MASK,0},
142 143 144 145 146 147

	{0,SSL_TXT_MD5,	0,SSL_MD5,   0,0,0,0,SSL_MAC_MASK,0},
	{0,SSL_TXT_SHA1,0,SSL_SHA1,  0,0,0,0,SSL_MAC_MASK,0},
	{0,SSL_TXT_SHA,	0,SSL_SHA,   0,0,0,0,SSL_MAC_MASK,0},

	{0,SSL_TXT_NULL,0,SSL_NULL,  0,0,0,0,SSL_ENC_MASK,0},
148
	{0,SSL_TXT_KRB5,0,SSL_KRB5,  0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK,0},
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	{0,SSL_TXT_RSA,	0,SSL_RSA,   0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK,0},
	{0,SSL_TXT_ADH,	0,SSL_ADH,   0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK,0},
	{0,SSL_TXT_FZA,	0,SSL_FZA,   0,0,0,0,SSL_AUTH_MASK|SSL_MKEY_MASK|SSL_ENC_MASK,0},

	{0,SSL_TXT_SSLV2, 0,SSL_SSLV2, 0,0,0,0,SSL_SSL_MASK,0},
	{0,SSL_TXT_SSLV3, 0,SSL_SSLV3, 0,0,0,0,SSL_SSL_MASK,0},
	{0,SSL_TXT_TLSV1, 0,SSL_TLSV1, 0,0,0,0,SSL_SSL_MASK,0},

	{0,SSL_TXT_EXP   ,0, 0,SSL_EXPORT, 0,0,0,0,SSL_EXP_MASK},
	{0,SSL_TXT_EXPORT,0, 0,SSL_EXPORT, 0,0,0,0,SSL_EXP_MASK},
	{0,SSL_TXT_EXP40, 0, 0, SSL_EXP40, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_EXP56, 0, 0, SSL_EXP56, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_LOW,   0, 0,   SSL_LOW, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_MEDIUM,0, 0,SSL_MEDIUM, 0,0,0,0,SSL_STRONG_MASK},
	{0,SSL_TXT_HIGH,  0, 0,  SSL_HIGH, 0,0,0,0,SSL_STRONG_MASK},
164 165 166 167
	};

static int init_ciphers=1;

U
Ulf Möller 已提交
168
static void load_ciphers(void)
169 170 171 172 173 174 175 176 177 178 179 180
	{
	init_ciphers=0;
	ssl_cipher_methods[SSL_ENC_DES_IDX]= 
		EVP_get_cipherbyname(SN_des_cbc);
	ssl_cipher_methods[SSL_ENC_3DES_IDX]=
		EVP_get_cipherbyname(SN_des_ede3_cbc);
	ssl_cipher_methods[SSL_ENC_RC4_IDX]=
		EVP_get_cipherbyname(SN_rc4);
	ssl_cipher_methods[SSL_ENC_RC2_IDX]= 
		EVP_get_cipherbyname(SN_rc2_cbc);
	ssl_cipher_methods[SSL_ENC_IDEA_IDX]= 
		EVP_get_cipherbyname(SN_idea_cbc);
D
 
Dr. Stephen Henson 已提交
181 182 183 184
	ssl_cipher_methods[SSL_ENC_AES128_IDX]=
	  EVP_get_cipherbyname(SN_aes_128_cbc);
	ssl_cipher_methods[SSL_ENC_AES256_IDX]=
	  EVP_get_cipherbyname(SN_aes_256_cbc);
185 186 187 188 189 190 191

	ssl_digest_methods[SSL_MD_MD5_IDX]=
		EVP_get_digestbyname(SN_md5);
	ssl_digest_methods[SSL_MD_SHA1_IDX]=
		EVP_get_digestbyname(SN_sha1);
	}

192 193 194 195 196 197 198 199
static int sk_comp_cmp(const SSL_COMP * const *a,
			const SSL_COMP * const *b)
	{
	return((*a)->id-(*b)->id);
	}

static void load_builtin_compressions(void)
	{
200 201 202 203
	if (ssl_comp_methods != NULL)
		return;

	CRYPTO_w_lock(CRYPTO_LOCK_SSL);
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	if (ssl_comp_methods == NULL)
		{
		SSL_COMP *comp = NULL;

		MemCheck_off();
		ssl_comp_methods=sk_SSL_COMP_new(sk_comp_cmp);
		if (ssl_comp_methods != NULL)
			{
			comp=(SSL_COMP *)OPENSSL_malloc(sizeof(SSL_COMP));
			if (comp != NULL)
				{
				comp->method=COMP_zlib();
				if (comp->method
					&& comp->method->type == NID_undef)
					OPENSSL_free(comp);
				else
					{
					comp->id=SSL_COMP_ZLIB_IDX;
					comp->name=comp->method->name;
					sk_SSL_COMP_push(ssl_comp_methods,comp);
					}
				}
			}
		MemCheck_on();
		}
229
	CRYPTO_w_unlock(CRYPTO_LOCK_SSL);
230 231
	}

U
Ulf Möller 已提交
232 233
int ssl_cipher_get_evp(SSL_SESSION *s, const EVP_CIPHER **enc,
	     const EVP_MD **md, SSL_COMP **comp)
234 235
	{
	int i;
236
	SSL_CIPHER *c;
237

238
	c=s->cipher;
239
	if (c == NULL) return(0);
240 241 242 243
	if (comp != NULL)
		{
		SSL_COMP ctmp;

244
		load_builtin_compressions();
245

246 247 248 249
		*comp=NULL;
		ctmp.id=s->compress_meth;
		if (ssl_comp_methods != NULL)
			{
B
Ben Laurie 已提交
250
			i=sk_SSL_COMP_find(ssl_comp_methods,&ctmp);
251
			if (i >= 0)
B
Ben Laurie 已提交
252
				*comp=sk_SSL_COMP_value(ssl_comp_methods,i);
253 254 255 256 257 258
			else
				*comp=NULL;
			}
		}

	if ((enc == NULL) || (md == NULL)) return(0);
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279

	switch (c->algorithms & SSL_ENC_MASK)
		{
	case SSL_DES:
		i=SSL_ENC_DES_IDX;
		break;
	case SSL_3DES:
		i=SSL_ENC_3DES_IDX;
		break;
	case SSL_RC4:
		i=SSL_ENC_RC4_IDX;
		break;
	case SSL_RC2:
		i=SSL_ENC_RC2_IDX;
		break;
	case SSL_IDEA:
		i=SSL_ENC_IDEA_IDX;
		break;
	case SSL_eNULL:
		i=SSL_ENC_NULL_IDX;
		break;
D
 
Dr. Stephen Henson 已提交
280
	case SSL_AES:
281 282
		switch(c->alg_bits)
			{
D
 
Dr. Stephen Henson 已提交
283 284
		case 128: i=SSL_ENC_AES128_IDX; break;
		case 256: i=SSL_ENC_AES256_IDX; break;
285 286 287
		default: i=-1; break;
			}
		break;
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	default:
		i= -1;
		break;
		}

	if ((i < 0) || (i > SSL_ENC_NUM_IDX))
		*enc=NULL;
	else
		{
		if (i == SSL_ENC_NULL_IDX)
			*enc=EVP_enc_null();
		else
			*enc=ssl_cipher_methods[i];
		}

	switch (c->algorithms & SSL_MAC_MASK)
		{
	case SSL_MD5:
		i=SSL_MD_MD5_IDX;
		break;
	case SSL_SHA1:
		i=SSL_MD_SHA1_IDX;
		break;
	default:
		i= -1;
		break;
		}
	if ((i < 0) || (i > SSL_MD_NUM_IDX))
		*md=NULL;
	else
		*md=ssl_digest_methods[i];

	if ((*enc != NULL) && (*md != NULL))
		return(1);
	else
		return(0);
	}

326 327 328
#define ITEM_SEP(a) \
	(((a) == ':') || ((a) == ' ') || ((a) == ';') || ((a) == ','))

U
Ulf Möller 已提交
329 330
static void ll_append_tail(CIPHER_ORDER **head, CIPHER_ORDER *curr,
	     CIPHER_ORDER **tail)
331 332 333 334 335 336 337 338 339 340 341 342 343 344
	{
	if (curr == *tail) return;
	if (curr == *head)
		*head=curr->next;
	if (curr->prev != NULL)
		curr->prev->next=curr->next;
	if (curr->next != NULL) /* should always be true */
		curr->next->prev=curr->prev;
	(*tail)->next=curr;
	curr->prev= *tail;
	curr->next=NULL;
	*tail=curr;
	}

345
static unsigned long ssl_cipher_get_disabled(void)
346
	{
347
	unsigned long mask;
348

349
	mask = SSL_kFZA;
350
#ifdef OPENSSL_NO_RSA
351
	mask |= SSL_aRSA|SSL_kRSA;
352
#endif
353
#ifdef OPENSSL_NO_DSA
354
	mask |= SSL_aDSS;
355
#endif
356
#ifdef OPENSSL_NO_DH
357
	mask |= SSL_kDHr|SSL_kDHd|SSL_kEDH|SSL_aDH;
358
#endif
359
#ifdef OPENSSL_NO_KRB5
360 361
	mask |= SSL_kKRB5|SSL_aKRB5;
#endif
B
Bodo Möller 已提交
362 363 364
#ifdef OPENSSL_NO_ECDH
	mask |= SSL_kECDH|SSL_kECDHE;
#endif
B
Ben Laurie 已提交
365
#ifdef SSL_FORBID_ENULL
366
	mask |= SSL_eNULL;
367 368
#endif

369 370 371 372 373 374
	mask |= (ssl_cipher_methods[SSL_ENC_DES_IDX ] == NULL) ? SSL_DES :0;
	mask |= (ssl_cipher_methods[SSL_ENC_3DES_IDX] == NULL) ? SSL_3DES:0;
	mask |= (ssl_cipher_methods[SSL_ENC_RC4_IDX ] == NULL) ? SSL_RC4 :0;
	mask |= (ssl_cipher_methods[SSL_ENC_RC2_IDX ] == NULL) ? SSL_RC2 :0;
	mask |= (ssl_cipher_methods[SSL_ENC_IDEA_IDX] == NULL) ? SSL_IDEA:0;
	mask |= (ssl_cipher_methods[SSL_ENC_eFZA_IDX] == NULL) ? SSL_eFZA:0;
D
 
Dr. Stephen Henson 已提交
375
	mask |= (ssl_cipher_methods[SSL_ENC_AES128_IDX] == NULL) ? SSL_AES:0;
376

377 378
	mask |= (ssl_digest_methods[SSL_MD_MD5_IDX ] == NULL) ? SSL_MD5 :0;
	mask |= (ssl_digest_methods[SSL_MD_SHA1_IDX] == NULL) ? SSL_SHA1:0;
379

380 381 382 383
	return(mask);
	}

static void ssl_cipher_collect_ciphers(const SSL_METHOD *ssl_method,
384
		int num_of_ciphers, unsigned long mask, CIPHER_ORDER *co_list,
385 386
		CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p)
	{
387
	int i, co_list_num;
388 389 390 391 392 393 394 395
	SSL_CIPHER *c;

	/*
	 * We have num_of_ciphers descriptions compiled in, depending on the
	 * method selected (SSLv2 and/or SSLv3, TLSv1 etc).
	 * These will later be sorted in a linked list with at most num
	 * entries.
	 */
396

397
	/* Get the initial list of ciphers */
398
	co_list_num = 0;	/* actual count of ciphers */
399
	for (i = 0; i < num_of_ciphers; i++)
400
		{
401
		c = ssl_method->get_cipher(i);
402 403 404
		/* drop those that use any of that is not available */
		if ((c != NULL) && c->valid && !(c->algorithms & mask))
			{
405 406 407 408 409
			co_list[co_list_num].cipher = c;
			co_list[co_list_num].next = NULL;
			co_list[co_list_num].prev = NULL;
			co_list[co_list_num].active = 0;
			co_list_num++;
410 411 412
#ifdef KSSL_DEBUG
			printf("\t%d: %s %lx %lx\n",i,c->name,c->id,c->algorithms);
#endif	/* KSSL_DEBUG */
413
			/*
414
			if (!sk_push(ca_list,(char *)c)) goto err;
415
			*/
416 417
			}
		}
418 419 420 421

	/*
	 * Prepare linked list from list entries
	 */	
422
	for (i = 1; i < co_list_num - 1; i++)
423
		{
424 425
		co_list[i].prev = &(co_list[i-1]);
		co_list[i].next = &(co_list[i+1]);
426
		}
427
	if (co_list_num > 0)
428
		{
429
		(*head_p) = &(co_list[0]);
430
		(*head_p)->prev = NULL;
431 432 433
		(*head_p)->next = &(co_list[1]);
		(*tail_p) = &(co_list[co_list_num - 1]);
		(*tail_p)->prev = &(co_list[co_list_num - 2]);
434
		(*tail_p)->next = NULL;
435
		}
436
	}
437

438 439 440 441 442 443 444
static void ssl_cipher_collect_aliases(SSL_CIPHER **ca_list,
			int num_of_group_aliases, unsigned long mask,
			CIPHER_ORDER *head)
	{
	CIPHER_ORDER *ciph_curr;
	SSL_CIPHER **ca_curr;
	int i;
445

446 447 448 449 450 451
	/*
	 * First, add the real ciphers as already collected
	 */
	ciph_curr = head;
	ca_curr = ca_list;
	while (ciph_curr != NULL)
452
		{
453 454 455
		*ca_curr = ciph_curr->cipher;
		ca_curr++;
		ciph_curr = ciph_curr->next;
456 457
		}

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	/*
	 * Now we add the available ones from the cipher_aliases[] table.
	 * They represent either an algorithm, that must be fully
	 * supported (not match any bit in mask) or represent a cipher
	 * strength value (will be added in any case because algorithms=0).
	 */
	for (i = 0; i < num_of_group_aliases; i++)
		{
		if ((i == 0) ||		/* always fetch "ALL" */
		    !(cipher_aliases[i].algorithms & mask))
			{
			*ca_curr = (SSL_CIPHER *)(cipher_aliases + i);
			ca_curr++;
			}
		}
473

474 475
	*ca_curr = NULL;	/* end of list */
	}
476

477 478
static void ssl_cipher_apply_rule(unsigned long algorithms, unsigned long mask,
		unsigned long algo_strength, unsigned long mask_strength,
479
		int rule, int strength_bits, CIPHER_ORDER *co_list,
480 481 482 483 484 485 486 487 488 489 490
		CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p)
	{
	CIPHER_ORDER *head, *tail, *curr, *curr2, *tail2;
	SSL_CIPHER *cp;
	unsigned long ma, ma_s;

#ifdef CIPHER_DEBUG
	printf("Applying rule %d with %08lx %08lx %08lx %08lx (%d)\n",
		rule, algorithms, mask, algo_strength, mask_strength,
		strength_bits);
#endif
491

492 493 494
	curr = head = *head_p;
	curr2 = head;
	tail2 = tail = *tail_p;
495 496
	for (;;)
		{
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
		if ((curr == NULL) || (curr == tail2)) break;
		curr = curr2;
		curr2 = curr->next;

		cp = curr->cipher;

		/*
		 * Selection criteria is either the number of strength_bits
		 * or the algorithm used.
		 */
		if (strength_bits == -1)
			{
			ma = mask & cp->algorithms;
			ma_s = mask_strength & cp->algo_strength;

#ifdef CIPHER_DEBUG
			printf("\nName: %s:\nAlgo = %08lx Algo_strength = %08lx\nMask = %08lx Mask_strength %08lx\n", cp->name, cp->algorithms, cp->algo_strength, mask, mask_strength);
			printf("ma = %08lx ma_s %08lx, ma&algo=%08lx, ma_s&algos=%08lx\n", ma, ma_s, ma&algorithms, ma_s&algo_strength);
#endif
			/*
			 * Select: if none of the mask bit was met from the
			 * cipher or not all of the bits were met, the
			 * selection does not apply.
			 */
			if (((ma == 0) && (ma_s == 0)) ||
			    ((ma & algorithms) != ma) ||
			    ((ma_s & algo_strength) != ma_s))
				continue; /* does not apply */
			}
		else if (strength_bits != cp->strength_bits)
			continue;	/* does not apply */

#ifdef CIPHER_DEBUG
		printf("Action = %d\n", rule);
#endif

		/* add the cipher if it has not been added yet. */
		if (rule == CIPHER_ADD)
			{
			if (!curr->active)
				{
				ll_append_tail(&head, curr, &tail);
				curr->active = 1;
				}
			}
		/* Move the added cipher to this location */
		else if (rule == CIPHER_ORD)
			{
			if (curr->active)
				{
				ll_append_tail(&head, curr, &tail);
				}
			}
		else if	(rule == CIPHER_DEL)
			curr->active = 0;
		else if (rule == CIPHER_KILL)
			{
			if (head == curr)
				head = curr->next;
			else
				curr->prev->next = curr->next;
			if (tail == curr)
				tail = curr->prev;
			curr->active = 0;
			if (curr->next != NULL)
				curr->next->prev = curr->prev;
			if (curr->prev != NULL)
				curr->prev->next = curr->next;
			curr->next = NULL;
			curr->prev = NULL;
			}
		}

	*head_p = head;
	*tail_p = tail;
	}

574 575 576
static int ssl_cipher_strength_sort(CIPHER_ORDER *co_list,
				    CIPHER_ORDER **head_p,
				    CIPHER_ORDER **tail_p)
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	{
	int max_strength_bits, i, *number_uses;
	CIPHER_ORDER *curr;

	/*
	 * This routine sorts the ciphers with descending strength. The sorting
	 * must keep the pre-sorted sequence, so we apply the normal sorting
	 * routine as '+' movement to the end of the list.
	 */
	max_strength_bits = 0;
	curr = *head_p;
	while (curr != NULL)
		{
		if (curr->active &&
		    (curr->cipher->strength_bits > max_strength_bits))
		    max_strength_bits = curr->cipher->strength_bits;
		curr = curr->next;
		}

596
	number_uses = OPENSSL_malloc((max_strength_bits + 1) * sizeof(int));
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	if (!number_uses)
	{
		SSLerr(SSL_F_SSL_CIPHER_STRENGTH_SORT,ERR_R_MALLOC_FAILURE);
		return(0);
	}
	memset(number_uses, 0, (max_strength_bits + 1) * sizeof(int));

	/*
	 * Now find the strength_bits values actually used
	 */
	curr = *head_p;
	while (curr != NULL)
		{
		if (curr->active)
			number_uses[curr->cipher->strength_bits]++;
		curr = curr->next;
		}
	/*
	 * Go through the list of used strength_bits values in descending
616
	 * order.
617 618 619 620
	 */
	for (i = max_strength_bits; i >= 0; i--)
		if (number_uses[i] > 0)
			ssl_cipher_apply_rule(0, 0, 0, 0, CIPHER_ORD, i,
621
					co_list, head_p, tail_p);
622

623
	OPENSSL_free(number_uses);
624 625 626 627
	return(1);
	}

static int ssl_cipher_process_rulestr(const char *rule_str,
628
		CIPHER_ORDER *co_list, CIPHER_ORDER **head_p,
629 630 631 632 633 634
		CIPHER_ORDER **tail_p, SSL_CIPHER **ca_list)
	{
	unsigned long algorithms, mask, algo_strength, mask_strength;
	const char *l, *start, *buf;
	int j, multi, found, rule, retval, ok, buflen;
	char ch;
635

636 637 638 639 640
	retval = 1;
	l = rule_str;
	for (;;)
		{
		ch = *l;
641

642 643
		if (ch == '\0')
			break;		/* done */
644
		if (ch == '-')
645
			{ rule = CIPHER_DEL; l++; }
646
		else if (ch == '+')
647
			{ rule = CIPHER_ORD; l++; }
648
		else if (ch == '!')
649 650 651 652 653
			{ rule = CIPHER_KILL; l++; }
		else if (ch == '@')
			{ rule = CIPHER_SPECIAL; l++; }
		else
			{ rule = CIPHER_ADD; }
654

655
		if (ITEM_SEP(ch))
656 657 658 659
			{
			l++;
			continue;
			}
660 661

		algorithms = mask = algo_strength = mask_strength = 0;
662 663 664 665

		start=l;
		for (;;)
			{
666 667 668
			ch = *l;
			buf = l;
			buflen = 0;
669
#ifndef CHARSET_EBCDIC
670 671 672 673
			while (	((ch >= 'A') && (ch <= 'Z')) ||
				((ch >= '0') && (ch <= '9')) ||
				((ch >= 'a') && (ch <= 'z')) ||
				 (ch == '-'))
674 675 676
#else
			while (	isalnum(ch) || (ch == '-'))
#endif
677
				 {
678 679
				 ch = *(++l);
				 buflen++;
680
				 }
681 682 683 684

			if (buflen == 0)
				{
				/*
685
				 * We hit something we cannot deal with,
U
Ulf Möller 已提交
686
				 * it is no command or separator nor
687 688 689 690 691 692 693 694 695 696 697
				 * alphanumeric, so we call this an error.
				 */
				SSLerr(SSL_F_SSL_CIPHER_PROCESS_RULESTR,
				       SSL_R_INVALID_COMMAND);
				retval = found = 0;
				l++;
				break;
				}

			if (rule == CIPHER_SPECIAL)
				{
698
				found = 0; /* unused -- avoid compiler warning */
699 700
				break;	/* special treatment */
				}
701 702

			/* check for multi-part specification */
703 704 705 706 707 708 709
			if (ch == '+')
				{
				multi=1;
				l++;
				}
			else
				multi=0;
710

711
			/*
712
			 * Now search for the cipher alias in the ca_list. Be careful
713 714 715
			 * with the strncmp, because the "buflen" limitation
			 * will make the rule "ADH:SOME" and the cipher
			 * "ADH-MY-CIPHER" look like a match for buflen=3.
716 717
			 * So additionally check whether the cipher name found
			 * has the correct length. We can save a strlen() call:
718
			 * just checking for the '\0' at the right place is
719 720
			 * sufficient, we have to strncmp() anyway. (We cannot
			 * use strcmp(), because buf is not '\0' terminated.)
721 722 723 724
			 */
			 j = found = 0;
			 while (ca_list[j])
				{
725 726
				if (!strncmp(buf, ca_list[j]->name, buflen) &&
				    (ca_list[j]->name[buflen] == '\0'))
727 728 729 730 731 732 733 734 735 736 737 738 739 740
					{
					found = 1;
					break;
					}
				else
					j++;
				}
			if (!found)
				break;	/* ignore this entry */

			algorithms |= ca_list[j]->algorithms;
			mask |= ca_list[j]->mask;
			algo_strength |= ca_list[j]->algo_strength;
			mask_strength |= ca_list[j]->mask_strength;
741 742 743

			if (!multi) break;
			}
744

745 746 747 748 749 750 751 752
		/*
		 * Ok, we have the rule, now apply it
		 */
		if (rule == CIPHER_SPECIAL)
			{	/* special command */
			ok = 0;
			if ((buflen == 8) &&
				!strncmp(buf, "STRENGTH", 8))
753
				ok = ssl_cipher_strength_sort(co_list,
754 755 756 757 758 759
					head_p, tail_p);
			else
				SSLerr(SSL_F_SSL_CIPHER_PROCESS_RULESTR,
					SSL_R_INVALID_COMMAND);
			if (ok == 0)
				retval = 0;
760
			/*
761 762 763 764
			 * We do not support any "multi" options
			 * together with "@", so throw away the
			 * rest of the command, if any left, until
			 * end or ':' is found.
765
			 */
766 767 768 769 770 771 772
			while ((*l != '\0') && ITEM_SEP(*l))
				l++;
			}
		else if (found)
			{
			ssl_cipher_apply_rule(algorithms, mask,
				algo_strength, mask_strength, rule, -1,
773
				co_list, head_p, tail_p);
774 775 776 777 778 779 780
			}
		else
			{
			while ((*l != '\0') && ITEM_SEP(*l))
				l++;
			}
		if (*l == '\0') break; /* done */
781 782
		}

783 784 785 786 787 788 789 790 791 792 793 794
	return(retval);
	}

STACK_OF(SSL_CIPHER) *ssl_create_cipher_list(const SSL_METHOD *ssl_method,
		STACK_OF(SSL_CIPHER) **cipher_list,
		STACK_OF(SSL_CIPHER) **cipher_list_by_id,
		const char *rule_str)
	{
	int ok, num_of_ciphers, num_of_alias_max, num_of_group_aliases;
	unsigned long disabled_mask;
	STACK_OF(SSL_CIPHER) *cipherstack;
	const char *rule_p;
795
	CIPHER_ORDER *co_list = NULL, *head = NULL, *tail = NULL, *curr;
796 797 798 799 800 801 802
	SSL_CIPHER **ca_list = NULL;

	/*
	 * Return with error if nothing to do.
	 */
	if (rule_str == NULL) return(NULL);

803 804 805 806 807 808
	if (init_ciphers)
		{
		CRYPTO_w_lock(CRYPTO_LOCK_SSL);
		if (init_ciphers) load_ciphers();
		CRYPTO_w_unlock(CRYPTO_LOCK_SSL);
		}
809

810 811 812 813 814 815 816 817 818 819 820 821
	/*
	 * To reduce the work to do we only want to process the compiled
	 * in algorithms, so we first get the mask of disabled ciphers.
	 */
	disabled_mask = ssl_cipher_get_disabled();

	/*
	 * Now we have to collect the available ciphers from the compiled
	 * in ciphers. We cannot get more than the number compiled in, so
	 * it is used for allocation.
	 */
	num_of_ciphers = ssl_method->num_ciphers();
822 823 824
#ifdef KSSL_DEBUG
	printf("ssl_create_cipher_list() for %d ciphers\n", num_of_ciphers);
#endif    /* KSSL_DEBUG */
825 826
	co_list = (CIPHER_ORDER *)OPENSSL_malloc(sizeof(CIPHER_ORDER) * num_of_ciphers);
	if (co_list == NULL)
827
		{
828 829 830
		SSLerr(SSL_F_SSL_CREATE_CIPHER_LIST,ERR_R_MALLOC_FAILURE);
		return(NULL);	/* Failure */
		}
831

832
	ssl_cipher_collect_ciphers(ssl_method, num_of_ciphers, disabled_mask,
833
				   co_list, &head, &tail);
834 835 836 837 838 839

	/*
	 * We also need cipher aliases for selecting based on the rule_str.
	 * There might be two types of entries in the rule_str: 1) names
	 * of ciphers themselves 2) aliases for groups of ciphers.
	 * For 1) we need the available ciphers and for 2) the cipher
U
Ulf Möller 已提交
840
	 * groups of cipher_aliases added together in one list (otherwise
841 842 843 844 845
	 * we would be happy with just the cipher_aliases table).
	 */
	num_of_group_aliases = sizeof(cipher_aliases) / sizeof(SSL_CIPHER);
	num_of_alias_max = num_of_ciphers + num_of_group_aliases + 1;
	ca_list =
846
		(SSL_CIPHER **)OPENSSL_malloc(sizeof(SSL_CIPHER *) * num_of_alias_max);
847 848
	if (ca_list == NULL)
		{
849
		OPENSSL_free(co_list);
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
		SSLerr(SSL_F_SSL_CREATE_CIPHER_LIST,ERR_R_MALLOC_FAILURE);
		return(NULL);	/* Failure */
		}
	ssl_cipher_collect_aliases(ca_list, num_of_group_aliases, disabled_mask,
				   head);

	/*
	 * If the rule_string begins with DEFAULT, apply the default rule
	 * before using the (possibly available) additional rules.
	 */
	ok = 1;
	rule_p = rule_str;
	if (strncmp(rule_str,"DEFAULT",7) == 0)
		{
		ok = ssl_cipher_process_rulestr(SSL_DEFAULT_CIPHER_LIST,
865
			co_list, &head, &tail, ca_list);
866 867 868 869
		rule_p += 7;
		if (*rule_p == ':')
			rule_p++;
		}
870

871
	if (ok && (strlen(rule_p) > 0))
872
		ok = ssl_cipher_process_rulestr(rule_p, co_list, &head, &tail,
873
						ca_list);
874

875
	OPENSSL_free(ca_list);	/* Not needed anymore */
876 877 878

	if (!ok)
		{	/* Rule processing failure */
879
		OPENSSL_free(co_list);
880 881 882 883 884 885
		return(NULL);
		}
	/*
	 * Allocate new "cipherstack" for the result, return with error
	 * if we cannot get one.
	 */
886
	if ((cipherstack = sk_SSL_CIPHER_new_null()) == NULL)
887
		{
888
		OPENSSL_free(co_list);
889
		return(NULL);
890 891
		}

892 893 894 895 896
	/*
	 * The cipher selection for the list is done. The ciphers are added
	 * to the resulting precedence to the STACK_OF(SSL_CIPHER).
	 */
	for (curr = head; curr != NULL; curr = curr->next)
897
		{
898
		if (curr->active)
899
			{
900
			sk_SSL_CIPHER_push(cipherstack, curr->cipher);
901
#ifdef CIPHER_DEBUG
902
			printf("<%s>\n",curr->cipher->name);
903 904 905
#endif
			}
		}
906
	OPENSSL_free(co_list);	/* Not needed any longer */
907 908 909 910 911 912 913 914 915

	/*
	 * The following passage is a little bit odd. If pointer variables
	 * were supplied to hold STACK_OF(SSL_CIPHER) return information,
	 * the old memory pointed to is free()ed. Then, however, the
	 * cipher_list entry will be assigned just a copy of the returned
	 * cipher stack. For cipher_list_by_id a copy of the cipher stack
	 * will be created. See next comment...
	 */
916 917 918
	if (cipher_list != NULL)
		{
		if (*cipher_list != NULL)
B
Ben Laurie 已提交
919
			sk_SSL_CIPHER_free(*cipher_list);
920
		*cipher_list = cipherstack;
921 922 923 924 925
		}

	if (cipher_list_by_id != NULL)
		{
		if (*cipher_list_by_id != NULL)
B
Ben Laurie 已提交
926
			sk_SSL_CIPHER_free(*cipher_list_by_id);
927
		*cipher_list_by_id = sk_SSL_CIPHER_dup(cipherstack);
928 929
		}

930 931
	/*
	 * Now it is getting really strange. If something failed during
U
Ulf Möller 已提交
932
	 * the previous pointer assignment or if one of the pointers was
933 934 935 936 937 938
	 * not requested, the error condition is met. That might be
	 * discussable. The strange thing is however that in this case
	 * the memory "ret" pointed to is "free()ed" and hence the pointer
	 * cipher_list becomes wild. The memory reserved for
	 * cipher_list_by_id however is not "free()ed" and stays intact.
	 */
939 940 941 942
	if (	(cipher_list_by_id == NULL) ||
		(*cipher_list_by_id == NULL) ||
		(cipher_list == NULL) ||
		(*cipher_list == NULL))
943 944 945 946 947
		{
		sk_SSL_CIPHER_free(cipherstack);
		return(NULL);
		}

B
Ben Laurie 已提交
948
	sk_SSL_CIPHER_set_cmp_func(*cipher_list_by_id,ssl_cipher_ptr_id_cmp);
949

950
	return(cipherstack);
951 952
	}

U
Ulf Möller 已提交
953
char *SSL_CIPHER_description(SSL_CIPHER *cipher, char *buf, int len)
954
	{
955
	int is_export,pkl,kl;
G
Geoff Thorpe 已提交
956
	char *ver,*exp_str;
957
	char *kx,*au,*enc,*mac;
958
	unsigned long alg,alg2,alg_s;
959 960 961
#ifdef KSSL_DEBUG
	static char *format="%-23s %s Kx=%-8s Au=%-4s Enc=%-9s Mac=%-4s%s AL=%lx\n";
#else
962
	static char *format="%-23s %s Kx=%-8s Au=%-4s Enc=%-9s Mac=%-4s%s\n";
963 964
#endif /* KSSL_DEBUG */

965
	alg=cipher->algorithms;
966
	alg_s=cipher->algo_strength;
967 968
	alg2=cipher->algorithm2;

969 970 971
	is_export=SSL_C_IS_EXPORT(cipher);
	pkl=SSL_C_EXPORT_PKEYLENGTH(cipher);
	kl=SSL_C_EXPORT_KEYLENGTH(cipher);
G
Geoff Thorpe 已提交
972
	exp_str=is_export?" export":"";
B
Bodo Möller 已提交
973
	
974 975 976 977 978 979 980 981 982 983
	if (alg & SSL_SSLV2)
		ver="SSLv2";
	else if (alg & SSL_SSLV3)
		ver="SSLv3";
	else
		ver="unknown";

	switch (alg&SSL_MKEY_MASK)
		{
	case SSL_kRSA:
984
		kx=is_export?(pkl == 512 ? "RSA(512)" : "RSA(1024)"):"RSA";
985 986 987 988 989 990 991
		break;
	case SSL_kDHr:
		kx="DH/RSA";
		break;
	case SSL_kDHd:
		kx="DH/DSS";
		break;
992 993 994 995
        case SSL_kKRB5:         /* VRS */
        case SSL_KRB5:          /* VRS */
            kx="KRB5";
            break;
996 997 998 999
	case SSL_kFZA:
		kx="Fortezza";
		break;
	case SSL_kEDH:
1000
		kx=is_export?(pkl == 512 ? "DH(512)" : "DH(1024)"):"DH";
1001
		break;
B
Bodo Möller 已提交
1002 1003 1004 1005
	case SSL_kECDH:
	case SSL_kECDHE:
		kx=is_export?"ECDH(<=163)":"ECDH";
		break;
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	default:
		kx="unknown";
		}

	switch (alg&SSL_AUTH_MASK)
		{
	case SSL_aRSA:
		au="RSA";
		break;
	case SSL_aDSS:
		au="DSS";
		break;
	case SSL_aDH:
		au="DH";
		break;
1021 1022 1023 1024
        case SSL_aKRB5:         /* VRS */
        case SSL_KRB5:          /* VRS */
            au="KRB5";
            break;
1025 1026 1027 1028
	case SSL_aFZA:
	case SSL_aNULL:
		au="None";
		break;
B
Bodo Möller 已提交
1029 1030 1031
	case SSL_aECDSA:
		au="ECDSA";
		break;
1032 1033 1034 1035 1036 1037 1038 1039
	default:
		au="unknown";
		break;
		}

	switch (alg&SSL_ENC_MASK)
		{
	case SSL_DES:
1040
		enc=(is_export && kl == 5)?"DES(40)":"DES(56)";
1041 1042 1043 1044 1045
		break;
	case SSL_3DES:
		enc="3DES(168)";
		break;
	case SSL_RC4:
1046
		enc=is_export?(kl == 5 ? "RC4(40)" : "RC4(56)")
1047
		  :((alg2&SSL2_CF_8_BYTE_ENC)?"RC4(64)":"RC4(128)");
1048 1049
		break;
	case SSL_RC2:
1050
		enc=is_export?(kl == 5 ? "RC2(40)" : "RC2(56)"):"RC2(128)";
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
		break;
	case SSL_IDEA:
		enc="IDEA(128)";
		break;
	case SSL_eFZA:
		enc="Fortezza";
		break;
	case SSL_eNULL:
		enc="None";
		break;
D
 
Dr. Stephen Henson 已提交
1061 1062
	case SSL_AES:
		switch(cipher->strength_bits)
1063
			{
1064 1065 1066 1067
		case 128: enc="AES(128)"; break;
		case 192: enc="AES(192)"; break;
		case 256: enc="AES(256)"; break;
		default: enc="AES(?""?""?)"; break;
1068 1069
			}
		break;
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	default:
		enc="unknown";
		break;
		}

	switch (alg&SSL_MAC_MASK)
		{
	case SSL_MD5:
		mac="MD5";
		break;
	case SSL_SHA1:
		mac="SHA1";
		break;
	default:
		mac="unknown";
		break;
		}

	if (buf == NULL)
		{
B
Bodo Möller 已提交
1090
		len=128;
1091 1092
		buf=OPENSSL_malloc(len);
		if (buf == NULL) return("OPENSSL_malloc Error");
1093 1094 1095 1096
		}
	else if (len < 128)
		return("Buffer too small");

1097
#ifdef KSSL_DEBUG
G
Geoff Thorpe 已提交
1098
	BIO_snprintf(buf,len,format,cipher->name,ver,kx,au,enc,mac,exp_str,alg);
1099
#else
G
Geoff Thorpe 已提交
1100
	BIO_snprintf(buf,len,format,cipher->name,ver,kx,au,enc,mac,exp_str);
1101
#endif /* KSSL_DEBUG */
1102 1103 1104
	return(buf);
	}

U
Ulf Möller 已提交
1105
char *SSL_CIPHER_get_version(SSL_CIPHER *c)
1106 1107 1108
	{
	int i;

1109
	if (c == NULL) return("(NONE)");
1110 1111
	i=(int)(c->id>>24L);
	if (i == 3)
1112
		return("TLSv1/SSLv3");
1113 1114 1115 1116 1117 1118 1119
	else if (i == 2)
		return("SSLv2");
	else
		return("unknown");
	}

/* return the actual cipher being used */
U
Ulf Möller 已提交
1120
const char *SSL_CIPHER_get_name(SSL_CIPHER *c)
1121 1122 1123 1124 1125 1126
	{
	if (c != NULL)
		return(c->name);
	return("(NONE)");
	}

U
Ulf Möller 已提交
1127
/* number of bits for symmetric cipher */
U
Ulf Möller 已提交
1128
int SSL_CIPHER_get_bits(SSL_CIPHER *c, int *alg_bits)
1129
	{
1130
	int ret=0;
1131 1132 1133

	if (c != NULL)
		{
1134 1135
		if (alg_bits != NULL) *alg_bits = c->alg_bits;
		ret = c->strength_bits;
1136 1137 1138 1139
		}
	return(ret);
	}

U
Ulf Möller 已提交
1140
SSL_COMP *ssl3_comp_find(STACK_OF(SSL_COMP) *sk, int n)
1141 1142 1143 1144 1145
	{
	SSL_COMP *ctmp;
	int i,nn;

	if ((n == 0) || (sk == NULL)) return(NULL);
B
Ben Laurie 已提交
1146
	nn=sk_SSL_COMP_num(sk);
1147 1148
	for (i=0; i<nn; i++)
		{
B
Ben Laurie 已提交
1149
		ctmp=sk_SSL_COMP_value(sk,i);
1150 1151 1152 1153 1154 1155
		if (ctmp->id == n)
			return(ctmp);
		}
	return(NULL);
	}

U
Ulf Möller 已提交
1156
STACK_OF(SSL_COMP) *SSL_COMP_get_compression_methods(void)
1157
	{
1158
	load_builtin_compressions();
1159 1160 1161
	return(ssl_comp_methods);
	}

U
Ulf Möller 已提交
1162
int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm)
1163 1164 1165
	{
	SSL_COMP *comp;

1166 1167 1168
        if (cm == NULL || cm->type == NID_undef)
                return 1;

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
	/* According to draft-ietf-tls-compression-04.txt, the
	   compression number ranges should be the following:

	   0 to 63:    methods defined by the IETF
	   64 to 192:  external party methods assigned by IANA
	   193 to 255: reserved for private use */
	if (id < 193 || id > 255)
		{
		SSLerr(SSL_F_SSL_COMP_ADD_COMPRESSION_METHOD,SSL_R_COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE);
		return 0;
		}

1181
	MemCheck_off();
1182
	comp=(SSL_COMP *)OPENSSL_malloc(sizeof(SSL_COMP));
1183 1184
	comp->id=id;
	comp->method=cm;
1185
	load_builtin_compressions();
1186 1187 1188 1189 1190 1191 1192 1193 1194
	if (ssl_comp_methods
		&& !sk_SSL_COMP_find(ssl_comp_methods,comp))
		{
		OPENSSL_free(comp);
		MemCheck_on();
		SSLerr(SSL_F_SSL_COMP_ADD_COMPRESSION_METHOD,SSL_R_DUPLICATE_COMPRESSION_ID);
		return(1);
		}
	else if ((ssl_comp_methods == NULL)
1195
		|| !sk_SSL_COMP_push(ssl_comp_methods,comp))
1196
		{
1197
		OPENSSL_free(comp);
1198
		MemCheck_on();
1199
		SSLerr(SSL_F_SSL_COMP_ADD_COMPRESSION_METHOD,ERR_R_MALLOC_FAILURE);
R
Richard Levitte 已提交
1200
		return(1);
1201 1202
		}
	else
1203 1204
		{
		MemCheck_on();
R
Richard Levitte 已提交
1205
		return(0);
1206
		}
1207
	}
1208 1209 1210 1211 1212 1213 1214 1215

const char *SSL_COMP_get_name(const COMP_METHOD *comp)
	{
	if (comp)
		return comp->name;
	return NULL;
	}