提交 963cab50 编写于 作者: H Hariprasad S 提交者: Doug Ledford

iw_cxgb4: Adds support for T6 adapter

Signed-off-by: NHariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 27999805
...@@ -632,22 +632,18 @@ static void best_mtu(const unsigned short *mtus, unsigned short mtu, ...@@ -632,22 +632,18 @@ static void best_mtu(const unsigned short *mtus, unsigned short mtu,
static int send_connect(struct c4iw_ep *ep) static int send_connect(struct c4iw_ep *ep)
{ {
struct cpl_act_open_req *req; struct cpl_act_open_req *req = NULL;
struct cpl_t5_act_open_req *t5_req; struct cpl_t5_act_open_req *t5req = NULL;
struct cpl_act_open_req6 *req6; struct cpl_t6_act_open_req *t6req = NULL;
struct cpl_t5_act_open_req6 *t5_req6; struct cpl_act_open_req6 *req6 = NULL;
struct cpl_t5_act_open_req6 *t5req6 = NULL;
struct cpl_t6_act_open_req6 *t6req6 = NULL;
struct sk_buff *skb; struct sk_buff *skb;
u64 opt0; u64 opt0;
u32 opt2; u32 opt2;
unsigned int mtu_idx; unsigned int mtu_idx;
int wscale; int wscale;
int wrlen; int win, sizev4, sizev6, wrlen;
int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
sizeof(struct cpl_act_open_req) :
sizeof(struct cpl_t5_act_open_req);
int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
sizeof(struct cpl_act_open_req6) :
sizeof(struct cpl_t5_act_open_req6);
struct sockaddr_in *la = (struct sockaddr_in *) struct sockaddr_in *la = (struct sockaddr_in *)
&ep->com.mapped_local_addr; &ep->com.mapped_local_addr;
struct sockaddr_in *ra = (struct sockaddr_in *) struct sockaddr_in *ra = (struct sockaddr_in *)
...@@ -656,8 +652,28 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -656,8 +652,28 @@ static int send_connect(struct c4iw_ep *ep)
&ep->com.mapped_local_addr; &ep->com.mapped_local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
&ep->com.mapped_remote_addr; &ep->com.mapped_remote_addr;
int win;
int ret; int ret;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
u32 isn = (prandom_u32() & ~7UL) - 1;
switch (CHELSIO_CHIP_VERSION(adapter_type)) {
case CHELSIO_T4:
sizev4 = sizeof(struct cpl_act_open_req);
sizev6 = sizeof(struct cpl_act_open_req6);
break;
case CHELSIO_T5:
sizev4 = sizeof(struct cpl_t5_act_open_req);
sizev6 = sizeof(struct cpl_t5_act_open_req6);
break;
case CHELSIO_T6:
sizev4 = sizeof(struct cpl_t6_act_open_req);
sizev6 = sizeof(struct cpl_t6_act_open_req6);
break;
default:
pr_err("T%d Chip is not supported\n",
CHELSIO_CHIP_VERSION(adapter_type));
return -EINVAL;
}
wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
roundup(sizev4, 16) : roundup(sizev4, 16) :
...@@ -706,7 +722,10 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -706,7 +722,10 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= SACK_EN_F; opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling) if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F; opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
if (peer2peer)
isn += 4;
opt2 |= T5_OPT_2_VALID_F; opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F; opt2 |= T5_ISS_F;
...@@ -718,102 +737,109 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -718,102 +737,109 @@ static int send_connect(struct c4iw_ep *ep)
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
if (ep->com.remote_addr.ss_family == AF_INET) { if (ep->com.remote_addr.ss_family == AF_INET) {
req = (struct cpl_act_open_req *) skb_put(skb, wrlen); switch (CHELSIO_CHIP_VERSION(adapter_type)) {
case CHELSIO_T4:
req = (struct cpl_act_open_req *)skb_put(skb, wrlen);
INIT_TP_WR(req, 0); INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32( break;
MK_OPCODE_TID(CPL_ACT_OPEN_REQ, case CHELSIO_T5:
((ep->rss_qid << 14) | ep->atid))); t5req = (struct cpl_t5_act_open_req *)skb_put(skb,
wrlen);
INIT_TP_WR(t5req, 0);
req = (struct cpl_act_open_req *)t5req;
break;
case CHELSIO_T6:
t6req = (struct cpl_t6_act_open_req *)skb_put(skb,
wrlen);
INIT_TP_WR(t6req, 0);
req = (struct cpl_act_open_req *)t6req;
t5req = (struct cpl_t5_act_open_req *)t6req;
break;
default:
pr_err("T%d Chip is not supported\n",
CHELSIO_CHIP_VERSION(adapter_type));
ret = -EINVAL;
goto clip_release;
}
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
((ep->rss_qid<<14) | ep->atid)));
req->local_port = la->sin_port; req->local_port = la->sin_port;
req->peer_port = ra->sin_port; req->peer_port = ra->sin_port;
req->local_ip = la->sin_addr.s_addr; req->local_ip = la->sin_addr.s_addr;
req->peer_ip = ra->sin_addr.s_addr; req->peer_ip = ra->sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0); req->opt0 = cpu_to_be64(opt0);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
req->params = cpu_to_be32(cxgb4_select_ntuple( req->params = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0], ep->com.dev->rdev.lldi.ports[0],
ep->l2t)); ep->l2t));
req->opt2 = cpu_to_be32(opt2); req->opt2 = cpu_to_be32(opt2);
} else { } else {
t5req->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
t5req->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
t5req->opt2 = cpu_to_be32(opt2);
}
} else {
switch (CHELSIO_CHIP_VERSION(adapter_type)) {
case CHELSIO_T4:
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
INIT_TP_WR(req6, 0); INIT_TP_WR(req6, 0);
OPCODE_TID(req6) = cpu_to_be32( break;
MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, case CHELSIO_T5:
t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb,
wrlen);
INIT_TP_WR(t5req6, 0);
req6 = (struct cpl_act_open_req6 *)t5req6;
break;
case CHELSIO_T6:
t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb,
wrlen);
INIT_TP_WR(t6req6, 0);
req6 = (struct cpl_act_open_req6 *)t6req6;
t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
break;
default:
pr_err("T%d Chip is not supported\n",
CHELSIO_CHIP_VERSION(adapter_type));
ret = -EINVAL;
goto clip_release;
}
OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
((ep->rss_qid<<14)|ep->atid))); ((ep->rss_qid<<14)|ep->atid)));
req6->local_port = la6->sin6_port; req6->local_port = la6->sin6_port;
req6->peer_port = ra6->sin6_port; req6->peer_port = ra6->sin6_port;
req6->local_ip_hi = *((__be64 *) req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
(la6->sin6_addr.s6_addr)); req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
req6->local_ip_lo = *((__be64 *) req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
(la6->sin6_addr.s6_addr + 8)); req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
req6->peer_ip_hi = *((__be64 *)
(ra6->sin6_addr.s6_addr));
req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
req6->opt0 = cpu_to_be64(opt0); req6->opt0 = cpu_to_be64(opt0);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
req6->params = cpu_to_be32(cxgb4_select_ntuple( req6->params = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0], ep->com.dev->rdev.lldi.ports[0],
ep->l2t)); ep->l2t));
req6->opt2 = cpu_to_be32(opt2); req6->opt2 = cpu_to_be32(opt2);
}
} else { } else {
u32 isn = (prandom_u32() & ~7UL) - 1; t5req6->params = cpu_to_be64(FILTER_TUPLE_V(
if (peer2peer)
isn += 4;
if (ep->com.remote_addr.ss_family == AF_INET) {
t5_req = (struct cpl_t5_act_open_req *)
skb_put(skb, wrlen);
INIT_TP_WR(t5_req, 0);
OPCODE_TID(t5_req) = cpu_to_be32(
MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
((ep->rss_qid << 14) | ep->atid)));
t5_req->local_port = la->sin_port;
t5_req->peer_port = ra->sin_port;
t5_req->local_ip = la->sin_addr.s_addr;
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple( cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0], ep->com.dev->rdev.lldi.ports[0],
ep->l2t))); ep->l2t)));
t5_req->rsvd = cpu_to_be32(isn); t5req6->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__, PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
be32_to_cpu(t5_req->rsvd)); t5req6->opt2 = cpu_to_be32(opt2);
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
skb_put(skb, wrlen);
INIT_TP_WR(t5_req6, 0);
OPCODE_TID(t5_req6) = cpu_to_be32(
MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
((ep->rss_qid<<14)|ep->atid)));
t5_req6->local_port = la6->sin6_port;
t5_req6->peer_port = ra6->sin6_port;
t5_req6->local_ip_hi = *((__be64 *)
(la6->sin6_addr.s6_addr));
t5_req6->local_ip_lo = *((__be64 *)
(la6->sin6_addr.s6_addr + 8));
t5_req6->peer_ip_hi = *((__be64 *)
(ra6->sin6_addr.s6_addr));
t5_req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
t5_req6->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__,
be32_to_cpu(t5_req6->rsvd));
t5_req6->opt2 = cpu_to_be32(opt2);
} }
} }
set_bit(ACT_OPEN_REQ, &ep->com.history); set_bit(ACT_OPEN_REQ, &ep->com.history);
ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
clip_release:
if (ret && ep->com.remote_addr.ss_family == AF_INET6) if (ret && ep->com.remote_addr.ss_family == AF_INET6)
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&la6->sin6_addr.s6_addr, 1); (const u32 *)&la6->sin6_addr.s6_addr, 1);
...@@ -1902,7 +1928,7 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) ...@@ -1902,7 +1928,7 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
struct dst_entry *dst, struct c4iw_dev *cdev, struct dst_entry *dst, struct c4iw_dev *cdev,
bool clear_mpa_v1) bool clear_mpa_v1, enum chip_type adapter_type)
{ {
struct neighbour *n; struct neighbour *n;
int err, step; int err, step;
...@@ -1937,7 +1963,8 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, ...@@ -1937,7 +1963,8 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
goto out; goto out;
ep->mtu = pdev->mtu; ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev); ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
cxgb4_port_viid(pdev));
step = cdev->rdev.lldi.ntxq / step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan; cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step; ep->txq_idx = cxgb4_port_idx(pdev) * step;
...@@ -1956,7 +1983,8 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, ...@@ -1956,7 +1983,8 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
goto out; goto out;
ep->mtu = dst_mtu(dst); ep->mtu = dst_mtu(dst);
ep->tx_chan = cxgb4_port_chan(pdev); ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
cxgb4_port_viid(pdev));
step = cdev->rdev.lldi.ntxq / step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan; cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step; ep->txq_idx = cxgb4_port_idx(pdev) * step;
...@@ -2029,7 +2057,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ...@@ -2029,7 +2057,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
err = -EHOSTUNREACH; err = -EHOSTUNREACH;
goto fail3; goto fail3;
} }
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false); err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
ep->com.dev->rdev.lldi.adapter_type);
if (err) { if (err) {
pr_err("%s - cannot alloc l2e.\n", __func__); pr_err("%s - cannot alloc l2e.\n", __func__);
goto fail4; goto fail4;
...@@ -2217,13 +2246,14 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2217,13 +2246,14 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
int wscale; int wscale;
struct cpl_t5_pass_accept_rpl *rpl5 = NULL; struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
int win; int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(skb_cloned(skb)); BUG_ON(skb_cloned(skb));
skb_get(skb); skb_get(skb);
rpl = cplhdr(skb); rpl = cplhdr(skb);
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { if (!is_t4(adapter_type)) {
skb_trim(skb, roundup(sizeof(*rpl5), 16)); skb_trim(skb, roundup(sizeof(*rpl5), 16));
rpl5 = (void *)rpl; rpl5 = (void *)rpl;
INIT_TP_WR(rpl5, ep->hwtid); INIT_TP_WR(rpl5, ep->hwtid);
...@@ -2270,12 +2300,16 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2270,12 +2300,16 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
const struct tcphdr *tcph; const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len); u32 hlen = ntohl(req->hdr_len);
if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
IP_HDR_LEN_G(hlen); IP_HDR_LEN_G(hlen);
else
tcph = (const void *)(req + 1) +
T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
if (tcph->ece && tcph->cwr) if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1); opt2 |= CCTRL_ECN_V(1);
} }
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1; u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F; opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
...@@ -2306,12 +2340,16 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) ...@@ -2306,12 +2340,16 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
return; return;
} }
static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype, static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
__u8 *local_ip, __u8 *peer_ip, int *iptype, __u8 *local_ip, __u8 *peer_ip,
__be16 *local_port, __be16 *peer_port) __be16 *local_port, __be16 *peer_port)
{ {
int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)); int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)); ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
struct tcphdr *tcp = (struct tcphdr *) struct tcphdr *tcp = (struct tcphdr *)
...@@ -2366,7 +2404,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2366,7 +2404,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject; goto reject;
} }
get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port); get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype,
local_ip, peer_ip, &local_port, &peer_port);
/* Find output route */ /* Find output route */
if (iptype == 4) { if (iptype == 4) {
...@@ -2401,7 +2440,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2401,7 +2440,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject; goto reject;
} }
err = import_ep(child_ep, iptype, peer_ip, dst, dev, false); err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
parent_ep->com.dev->rdev.lldi.adapter_type);
if (err) { if (err) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__); __func__);
...@@ -3193,7 +3233,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3193,7 +3233,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail2; goto fail2;
} }
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true); err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
ep->com.dev->rdev.lldi.adapter_type);
if (err) { if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
goto fail3; goto fail3;
...@@ -3601,20 +3642,23 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3601,20 +3642,23 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
{ {
u32 l2info; __be32 l2info;
u16 vlantag, len, hdr_len, eth_hdr_len; __be16 hdr_len, vlantag, len;
u16 eth_hdr_len;
int tcp_hdr_len, ip_hdr_len;
u8 intf; u8 intf;
struct cpl_rx_pkt *cpl = cplhdr(skb); struct cpl_rx_pkt *cpl = cplhdr(skb);
struct cpl_pass_accept_req *req; struct cpl_pass_accept_req *req;
struct tcp_options_received tmp_opt; struct tcp_options_received tmp_opt;
struct c4iw_dev *dev; struct c4iw_dev *dev;
enum chip_type type;
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
/* Store values from cpl_rx_pkt in temporary location. */ /* Store values from cpl_rx_pkt in temporary location. */
vlantag = (__force u16) cpl->vlan; vlantag = cpl->vlan;
len = (__force u16) cpl->len; len = cpl->len;
l2info = (__force u32) cpl->l2info; l2info = cpl->l2info;
hdr_len = (__force u16) cpl->hdr_len; hdr_len = cpl->hdr_len;
intf = cpl->iff; intf = cpl->iff;
__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
...@@ -3631,20 +3675,28 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) ...@@ -3631,20 +3675,28 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(SYN_INTF_V(intf) | req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
SYN_MAC_IDX_V(RX_MACIDX_G( SYN_MAC_IDX_V(RX_MACIDX_G(
(__force int) htonl(l2info))) | be32_to_cpu(l2info))) |
SYN_XACT_MATCH_F); SYN_XACT_MATCH_F);
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? type = dev->rdev.lldi.adapter_type;
RX_ETHHDR_LEN_G((__force int)htonl(l2info)) : tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info)); ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G( req->hdr_len =
(__force int) htonl(l2info))) | cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
TCP_HDR_LEN_V(RX_TCPHDR_LEN_G( if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
(__force int) htons(hdr_len))) | eth_hdr_len = is_t4(type) ?
IP_HDR_LEN_V(RX_IPHDR_LEN_G( RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
(__force int) htons(hdr_len))) | RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len))); req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
req->vlan = (__force __be16) vlantag; IP_HDR_LEN_V(ip_hdr_len) |
req->len = (__force __be16) len; ETH_HDR_LEN_V(eth_hdr_len));
} else { /* T6 and later */
eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
T6_IP_HDR_LEN_V(ip_hdr_len) |
T6_ETH_HDR_LEN_V(eth_hdr_len));
}
req->vlan = vlantag;
req->len = len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
PASS_OPEN_TOS_V(tos)); PASS_OPEN_TOS_V(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp); req->tcpopt.mss = htons(tmp_opt.mss_clamp);
...@@ -3763,9 +3815,22 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3763,9 +3815,22 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject; goto reject;
} }
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
RX_ETHHDR_LEN_G(htonl(cpl->l2info)) : case CHELSIO_T4:
RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info)); eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
break;
case CHELSIO_T5:
eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
break;
case CHELSIO_T6:
eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
break;
default:
pr_err("T%d Chip is not supported\n",
CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
goto reject;
}
if (eth_hdr_len == ETH_HLEN) { if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1); eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1); iph = (struct iphdr *)(eh + 1);
......
...@@ -962,12 +962,12 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -962,12 +962,12 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi.sge_egrstatuspagesize; devp->rdev.lldi.sge_egrstatuspagesize;
/* /*
* For T5 devices, we map all of BAR2 with WC. * For T5/T6 devices, we map all of BAR2 with WC.
* For T4 devices with onchip qp mem, we map only that part * For T4 devices with onchip qp mem, we map only that part
* of BAR2 with WC. * of BAR2 with WC.
*/ */
devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
if (is_t5(devp->rdev.lldi.adapter_type)) { if (!is_t4(devp->rdev.lldi.adapter_type)) {
devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
pci_resource_len(devp->rdev.lldi.pdev, 2)); pci_resource_len(devp->rdev.lldi.pdev, 2));
if (!devp->rdev.bar2_kva) { if (!devp->rdev.bar2_kva) {
...@@ -1267,11 +1267,9 @@ static int enable_qp_db(int id, void *p, void *data) ...@@ -1267,11 +1267,9 @@ static int enable_qp_db(int id, void *p, void *data)
static void resume_rc_qp(struct c4iw_qp *qp) static void resume_rc_qp(struct c4iw_qp *qp)
{ {
spin_lock(&qp->lock); spin_lock(&qp->lock);
t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
qp->wq.sq.wq_pidx_inc = 0; qp->wq.sq.wq_pidx_inc = 0;
t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
qp->wq.rq.wq_pidx_inc = 0; qp->wq.rq.wq_pidx_inc = 0;
spin_unlock(&qp->lock); spin_unlock(&qp->lock);
} }
......
...@@ -209,7 +209,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ...@@ -209,7 +209,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (addr >= rdev->oc_mw_pa) if (addr >= rdev->oc_mw_pa)
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
else { else {
if (is_t5(rdev->lldi.adapter_type)) if (!is_t4(rdev->lldi.adapter_type))
vma->vm_page_prot = vma->vm_page_prot =
t4_pgprot_wc(vma->vm_page_prot); t4_pgprot_wc(vma->vm_page_prot);
else else
......
...@@ -712,8 +712,7 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) ...@@ -712,8 +712,7 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
spin_lock_irqsave(&qhp->rhp->lock, flags); spin_lock_irqsave(&qhp->rhp->lock, flags);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL) if (qhp->rhp->db_state == NORMAL)
t4_ring_sq_db(&qhp->wq, inc, t4_ring_sq_db(&qhp->wq, inc, NULL);
is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
else { else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.sq.wq_pidx_inc += inc; qhp->wq.sq.wq_pidx_inc += inc;
...@@ -730,8 +729,7 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) ...@@ -730,8 +729,7 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
spin_lock_irqsave(&qhp->rhp->lock, flags); spin_lock_irqsave(&qhp->rhp->lock, flags);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL) if (qhp->rhp->db_state == NORMAL)
t4_ring_rq_db(&qhp->wq, inc, t4_ring_rq_db(&qhp->wq, inc, NULL);
is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
else { else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.rq.wq_pidx_inc += inc; qhp->wq.rq.wq_pidx_inc += inc;
...@@ -817,7 +815,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -817,7 +815,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_FR_NSMR_WR; fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER; swsqe->opcode = FW_RI_FAST_REGISTER;
err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
is_t5( !is_t4(
qhp->rhp->rdev.lldi.adapter_type) ? qhp->rhp->rdev.lldi.adapter_type) ?
1 : 0); 1 : 0);
break; break;
...@@ -860,8 +858,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -860,8 +858,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
} }
if (!qhp->rhp->rdev.status_page->db_off) { if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_sq_db(&qhp->wq, idx, t4_ring_sq_db(&qhp->wq, idx, wqe);
is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
} else { } else {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
...@@ -934,8 +931,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -934,8 +931,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
num_wrs--; num_wrs--;
} }
if (!qhp->rhp->rdev.status_page->db_off) { if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_rq_db(&qhp->wq, idx, t4_ring_rq_db(&qhp->wq, idx, wqe);
is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
} else { } else {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
...@@ -1875,7 +1871,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1875,7 +1871,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attrs.rq_db_inc = attr->rq_psn; attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
(mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
return -EINVAL; return -EINVAL;
......
...@@ -455,8 +455,7 @@ static inline void pio_copy(u64 __iomem *dst, u64 *src) ...@@ -455,8 +455,7 @@ static inline void pio_copy(u64 __iomem *dst, u64 *src)
} }
} }
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
union t4_wr *wqe)
{ {
/* Flush host queue memory writes. */ /* Flush host queue memory writes. */
...@@ -482,7 +481,7 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, ...@@ -482,7 +481,7 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
} }
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
union t4_recv_wr *wqe) union t4_recv_wr *wqe)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册