diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 850be3a00e62d2ee5f51f3ea3b4ecd751e00af3b..1f1cc33895fd502b6c502ad7bd9d7bc7f85c6c8b 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -365,22 +365,13 @@ static void nft_hash_destroy(const struct nft_set *set) static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, struct nft_set_estimate *est) { - unsigned int esize; - - esize = sizeof(struct nft_hash_elem); - if (desc->size) { + if (desc->size) est->size = sizeof(struct nft_hash) + roundup_pow_of_two(desc->size * 4 / 3) * sizeof(struct nft_hash_elem *) + - desc->size * esize; - } else { - /* Resizing happens when the load drops below 30% or goes - * above 75%. The average of 52.5% load (approximated by 50%) - * is used for the size estimation of the hash buckets, - * meaning we calculate two buckets per element. - */ - est->size = esize + 2 * sizeof(struct nft_hash_elem *); - } + desc->size * sizeof(struct nft_hash_elem); + else + est->size = ~0; est->lookup = NFT_SET_CLASS_O_1; est->space = NFT_SET_CLASS_O_N; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index e97e2fb53f0a107b0361322be10f16b4ab4b5d32..fbfb3cbb3916f6430e2a84d5ddbd7c2f8db2d4ab 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -283,13 +283,11 @@ static void nft_rbtree_destroy(const struct nft_set *set) static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, struct nft_set_estimate *est) { - unsigned int nsize; - - nsize = sizeof(struct nft_rbtree_elem); if (desc->size) - est->size = sizeof(struct nft_rbtree) + desc->size * nsize; + est->size = sizeof(struct nft_rbtree) + + desc->size * sizeof(struct nft_rbtree_elem); else - est->size = nsize; + est->size = ~0; est->lookup = NFT_SET_CLASS_O_LOG_N; est->space = NFT_SET_CLASS_O_N;