提交 f443b21a 编写于 作者: X Xin Long 提交者: Yang Yingliang

sctp: delay auto_asconf init until binding the first addr

stable inclusion
from linux-4.19.191
commit 59339c866e0428fb92bfb3f5290c49a5325d2494

--------------------------------

commit 34e5b011 upstream.

As Or Cohen described:

  If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
  held and sp->do_auto_asconf is true, then an element is removed
  from the auto_asconf_splist without any proper locking.

  This can happen in the following functions:
  1. In sctp_accept, if sctp_sock_migrate fails.
  2. In inet_create or inet6_create, if there is a bpf program
     attached to BPF_CGROUP_INET_SOCK_CREATE which denies
     creation of the sctp socket.

This patch is to fix it by moving the auto_asconf init out of
sctp_init_sock(), by which inet_create()/inet6_create() won't
need to operate it in sctp_destroy_sock() when calling
sk_common_release().

It also makes more sense to do auto_asconf init while binding the
first addr, as auto_asconf actually requires an ANY addr bind,
see it in sctp_addr_wq_timeout_handler().

This addresses CVE-2021-23133.

Fixes: 61023658 ("bpf: Add new cgroup attach type to enable sock modifications")
Reported-by: NOr Cohen <orcohen@paloaltonetworks.com>
Signed-off-by: NXin Long <lucien.xin@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 9523beb6
...@@ -375,6 +375,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, ...@@ -375,6 +375,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
return af; return af;
} }
static void sctp_auto_asconf_init(struct sctp_sock *sp)
{
struct net *net = sock_net(&sp->inet.sk);
if (net->sctp.default_auto_asconf) {
spin_lock(&net->sctp.addr_wq_lock);
list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
spin_unlock(&net->sctp.addr_wq_lock);
sp->do_auto_asconf = 1;
}
}
/* Bind a local address either to an endpoint or to an association. */ /* Bind a local address either to an endpoint or to an association. */
static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
{ {
...@@ -437,8 +449,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) ...@@ -437,8 +449,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
} }
/* Refresh ephemeral port. */ /* Refresh ephemeral port. */
if (!bp->port) if (!bp->port) {
bp->port = inet_sk(sk)->inet_num; bp->port = inet_sk(sk)->inet_num;
sctp_auto_asconf_init(sp);
}
/* Add the address to the bind address list. /* Add the address to the bind address list.
* Use GFP_ATOMIC since BHs will be disabled. * Use GFP_ATOMIC since BHs will be disabled.
...@@ -4779,19 +4793,6 @@ static int sctp_init_sock(struct sock *sk) ...@@ -4779,19 +4793,6 @@ static int sctp_init_sock(struct sock *sk)
sk_sockets_allocated_inc(sk); sk_sockets_allocated_inc(sk);
sock_prot_inuse_add(net, sk->sk_prot, 1); sock_prot_inuse_add(net, sk->sk_prot, 1);
/* Nothing can fail after this block, otherwise
* sctp_destroy_sock() will be called without addr_wq_lock held
*/
if (net->sctp.default_auto_asconf) {
spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
list_add_tail(&sp->auto_asconf_list,
&net->sctp.auto_asconf_splist);
sp->do_auto_asconf = 1;
spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
} else {
sp->do_auto_asconf = 0;
}
local_bh_enable(); local_bh_enable();
return 0; return 0;
...@@ -8851,6 +8852,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -8851,6 +8852,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_bind_addr_dup(&newsp->ep->base.bind_addr, sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
&oldsp->ep->base.bind_addr, GFP_KERNEL); &oldsp->ep->base.bind_addr, GFP_KERNEL);
sctp_auto_asconf_init(newsp);
/* Move any messages in the old socket's receive queue that are for the /* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue. * peeled off association to the new socket's receive queue.
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册