diff --git a/include/linux/snmp.h b/include/linux/snmp.h index e28f5a0182e85fcba41a420c305e0f78bfaa318e..4435d1084755f07a3cbf7439a55d98aa5c0f7b4c 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -225,6 +225,8 @@ enum LINUX_MIB_SACKSHIFTED, LINUX_MIB_SACKMERGED, LINUX_MIB_SACKSHIFTFALLBACK, + LINUX_MIB_TCPBACKLOGDROP, + LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ __LINUX_MIB_MAX }; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 242ed23073702bfa6365e4f7c49c888c55615774..4f1f337f4337836169f19536de6adf4ba7c23064 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), + SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), + SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1915f7dc30e6abba10d1273403660a18726abc55..8d51d39ad1bb80b25834a18bb2aacddadc692a6d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1651,8 +1651,10 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!sk) goto no_tcp_socket; - if (iph->ttl < inet_sk(sk)->min_ttl) + if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; + } process: if (sk->sk_state == TCP_TIME_WAIT) @@ -1682,8 +1684,9 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } - } else if (sk_add_backlog(sk, skb)) { + } else if (unlikely(sk_add_backlog(sk, skb))) { bh_unlock_sock(sk); + NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; } bh_unlock_sock(sk); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2c378b1bd5cfedfbe3773e2ebc34e2665eb97994..9b6dbba80d312913278f50e059b1d19b4a554cfe 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1740,8 +1740,9 @@ static int tcp_v6_rcv(struct sk_buff *skb) if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } - } else if (sk_add_backlog(sk, skb)) { + } else if (unlikely(sk_add_backlog(sk, skb))) { bh_unlock_sock(sk); + NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; } bh_unlock_sock(sk);