提交 85bd6055 编写于 作者: J Jakub Kicinski

Merge branch 'bug-fixes-for-chtls-driver'

Ayush Sawal says:

====================
Bug fixes for chtls driver

patch 1: Fix hardware tid leak.
patch 2: Remove invalid set_tcb call.
patch 3: Fix panic when route to peer not configured.
patch 4: Avoid unnecessary freeing of oreq pointer.
patch 5: Replace skb_dequeue with skb_peek.
patch 6: Added a check to avoid NULL pointer dereference patch.
patch 7: Fix chtls resources release sequence.
====================

Link: https://lore.kernel.org/r/20210106042912.23512-1-ayush.sawal@chelsio.comSigned-off-by: NJakub Kicinski <kuba@kernel.org>
......@@ -621,7 +621,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
container_of((struct synq *)__skb_dequeue
container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
......@@ -1109,6 +1109,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
......@@ -1118,9 +1119,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
bool found = false;
u16 port_id;
int rxq_idx;
int step;
int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
......@@ -1152,7 +1154,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
if (!n)
if (!n || !n->dev)
goto free_sk;
ndev = n->dev;
......@@ -1161,6 +1163,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
for_each_port(adap, i)
if (cdev->ports[i] == ndev)
found = true;
if (!found)
goto free_dst;
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
......@@ -1238,6 +1247,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
......@@ -1387,7 +1397,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
goto free_oreq;
goto reject;
if (chtls_get_module(newsk))
goto reject;
......@@ -1403,8 +1413,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
free_oreq:
chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
......@@ -1589,6 +1597,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
if (!data) {
/* listening server close */
kfree_skb(skb);
goto unlock;
}
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
......@@ -1997,39 +2010,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
struct sk_buff *reply_skb;
struct chtls_sock *csk;
csk = rcu_dereference_sk_user_data(sk);
reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
GFP_KERNEL);
if (!reply_skb) {
req->status = (queue << 1);
t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
set_abort_rpl_wr(reply_skb, GET_TID(req), status);
kfree_skb(skb);
set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
if (csk_conn_inline(csk)) {
struct l2t_entry *e = csk->l2t_entry;
if (e && sk->sk_state != TCP_SYN_RECV) {
cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
return;
}
}
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
......@@ -2078,9 +2058,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
......@@ -2110,8 +2090,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
......@@ -2129,9 +2109,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
if (sk->sk_state == TCP_SYN_RECV)
chtls_set_tcb_tflag(sk, 0, 0);
kfree_skb(skb);
return;
}
......@@ -2158,12 +2135,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
chtls_release_resources(sk);
chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
chtls_release_resources(sk);
chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册