Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
be4c9bad
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
be4c9bad
编写于
5月 05, 2010
作者:
R
Roland Dreier
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
MAINTAINERS: Add cxgb4 and iw_cxgb4 entries
Signed-off-by:
N
Roland Dreier
<
rolandd@cisco.com
>
上级
cfdda9d7
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
238 addition
and
164 deletion
+238
-164
MAINTAINERS
MAINTAINERS
+14
-0
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cm.c
+180
-136
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/ev.c
+3
-3
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+2
-0
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/provider.c
+2
-2
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/qp.c
+14
-14
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/t4.h
+23
-9
未找到文件。
MAINTAINERS
浏览文件 @
be4c9bad
...
...
@@ -1719,6 +1719,20 @@ W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb3/
CXGB4 ETHERNET DRIVER (CXGB4)
M: Dimitris Michailidis <dm@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
F: drivers/net/cxgb4/
CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
M: Steve Wise <swise@chelsio.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb4/
CYBERPRO FB DRIVER
M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
...
...
drivers/infiniband/hw/cxgb4/cm.c
浏览文件 @
be4c9bad
...
...
@@ -61,6 +61,10 @@ static char *states[] = {
NULL
,
};
int
c4iw_max_read_depth
=
8
;
module_param
(
c4iw_max_read_depth
,
int
,
0644
);
MODULE_PARM_DESC
(
c4iw_max_read_depth
,
"Per-connection max ORD/IRD (default=8)"
);
static
int
enable_tcp_timestamps
;
module_param
(
enable_tcp_timestamps
,
int
,
0644
);
MODULE_PARM_DESC
(
enable_tcp_timestamps
,
"Enable tcp timestamps (default=0)"
);
...
...
@@ -113,18 +117,17 @@ static int snd_win = 32 * 1024;
module_param
(
snd_win
,
int
,
0644
);
MODULE_PARM_DESC
(
snd_win
,
"TCP send window in bytes (default=32KB)"
);
static
void
process_work
(
struct
work_struct
*
work
);
static
struct
workqueue_struct
*
workq
;
static
DECLARE_WORK
(
skb_work
,
process_work
);
static
struct
sk_buff_head
rxq
;
static
c4iw_handler_func
work_handlers
[
NUM_CPL_CMDS
];
c4iw_handler_func
c4iw_handlers
[
NUM_CPL_CMDS
];
static
struct
sk_buff
*
get_skb
(
struct
sk_buff
*
skb
,
int
len
,
gfp_t
gfp
);
static
void
ep_timeout
(
unsigned
long
arg
);
static
void
connect_reply_upcall
(
struct
c4iw_ep
*
ep
,
int
status
);
static
LIST_HEAD
(
timeout_list
);
static
spinlock_t
timeout_lock
;
static
void
start_ep_timer
(
struct
c4iw_ep
*
ep
)
{
PDBG
(
"%s ep %p
\n
"
,
__func__
,
ep
);
...
...
@@ -271,26 +274,6 @@ static void release_ep_resources(struct c4iw_ep *ep)
c4iw_put_ep
(
&
ep
->
com
);
}
static
void
process_work
(
struct
work_struct
*
work
)
{
struct
sk_buff
*
skb
=
NULL
;
struct
c4iw_dev
*
dev
;
struct
cpl_act_establish
*
rpl
=
cplhdr
(
skb
);
unsigned
int
opcode
;
int
ret
;
while
((
skb
=
skb_dequeue
(
&
rxq
)))
{
rpl
=
cplhdr
(
skb
);
dev
=
*
((
struct
c4iw_dev
**
)
(
skb
->
cb
+
sizeof
(
void
*
)));
opcode
=
rpl
->
ot
.
opcode
;
BUG_ON
(
!
work_handlers
[
opcode
]);
ret
=
work_handlers
[
opcode
](
dev
,
skb
);
if
(
!
ret
)
kfree_skb
(
skb
);
}
}
static
int
status2errno
(
int
status
)
{
switch
(
status
)
{
...
...
@@ -1795,76 +1778,6 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
return
0
;
}
static
int
fw6_msg
(
struct
c4iw_dev
*
dev
,
struct
sk_buff
*
skb
)
{
struct
cpl_fw6_msg
*
rpl
=
cplhdr
(
skb
);
struct
c4iw_wr_wait
*
wr_waitp
;
int
ret
;
PDBG
(
"%s type %u
\n
"
,
__func__
,
rpl
->
type
);
switch
(
rpl
->
type
)
{
case
1
:
ret
=
(
int
)((
be64_to_cpu
(
rpl
->
data
[
0
])
>>
8
)
&
0xff
);
wr_waitp
=
(
__force
struct
c4iw_wr_wait
*
)
rpl
->
data
[
1
];
PDBG
(
"%s wr_waitp %p ret %u
\n
"
,
__func__
,
wr_waitp
,
ret
);
if
(
wr_waitp
)
{
wr_waitp
->
ret
=
ret
;
wr_waitp
->
done
=
1
;
wake_up
(
&
wr_waitp
->
wait
);
}
break
;
case
2
:
c4iw_ev_dispatch
(
dev
,
(
struct
t4_cqe
*
)
&
rpl
->
data
[
0
]);
break
;
default:
printk
(
KERN_ERR
MOD
"%s unexpected fw6 msg type %u
\n
"
,
__func__
,
rpl
->
type
);
break
;
}
return
0
;
}
static
void
ep_timeout
(
unsigned
long
arg
)
{
struct
c4iw_ep
*
ep
=
(
struct
c4iw_ep
*
)
arg
;
struct
c4iw_qp_attributes
attrs
;
unsigned
long
flags
;
int
abort
=
1
;
spin_lock_irqsave
(
&
ep
->
com
.
lock
,
flags
);
PDBG
(
"%s ep %p tid %u state %d
\n
"
,
__func__
,
ep
,
ep
->
hwtid
,
ep
->
com
.
state
);
switch
(
ep
->
com
.
state
)
{
case
MPA_REQ_SENT
:
__state_set
(
&
ep
->
com
,
ABORTING
);
connect_reply_upcall
(
ep
,
-
ETIMEDOUT
);
break
;
case
MPA_REQ_WAIT
:
__state_set
(
&
ep
->
com
,
ABORTING
);
break
;
case
CLOSING
:
case
MORIBUND
:
if
(
ep
->
com
.
cm_id
&&
ep
->
com
.
qp
)
{
attrs
.
next_state
=
C4IW_QP_STATE_ERROR
;
c4iw_modify_qp
(
ep
->
com
.
qp
->
rhp
,
ep
->
com
.
qp
,
C4IW_QP_ATTR_NEXT_STATE
,
&
attrs
,
1
);
}
__state_set
(
&
ep
->
com
,
ABORTING
);
break
;
default:
printk
(
KERN_ERR
"%s unexpected state ep %p tid %u state %u
\n
"
,
__func__
,
ep
,
ep
->
hwtid
,
ep
->
com
.
state
);
WARN_ON
(
1
);
abort
=
0
;
}
spin_unlock_irqrestore
(
&
ep
->
com
.
lock
,
flags
);
if
(
abort
)
abort_connection
(
ep
,
NULL
,
GFP_ATOMIC
);
c4iw_put_ep
(
&
ep
->
com
);
}
int
c4iw_reject_cr
(
struct
iw_cm_id
*
cm_id
,
const
void
*
pdata
,
u8
pdata_len
)
{
int
err
;
...
...
@@ -1904,8 +1817,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
BUG_ON
(
state_read
(
&
ep
->
com
)
!=
MPA_REQ_RCVD
);
BUG_ON
(
!
qp
);
if
((
conn_param
->
ord
>
T4_MAX_READ_DEPTH
)
||
(
conn_param
->
ird
>
T4_MAX_READ_DEPTH
))
{
if
((
conn_param
->
ord
>
c4iw_max_read_depth
)
||
(
conn_param
->
ird
>
c4iw_max_read_depth
))
{
abort_connection
(
ep
,
NULL
,
GFP_KERNEL
);
err
=
-
EINVAL
;
goto
err
;
...
...
@@ -1968,6 +1881,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct
net_device
*
pdev
;
int
step
;
if
((
conn_param
->
ord
>
c4iw_max_read_depth
)
||
(
conn_param
->
ird
>
c4iw_max_read_depth
))
{
err
=
-
EINVAL
;
goto
out
;
}
ep
=
alloc_ep
(
sizeof
(
*
ep
),
GFP_KERNEL
);
if
(
!
ep
)
{
printk
(
KERN_ERR
MOD
"%s - cannot alloc ep.
\n
"
,
__func__
);
...
...
@@ -2115,7 +2033,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
*/
ep
->
stid
=
cxgb4_alloc_stid
(
dev
->
rdev
.
lldi
.
tids
,
PF_INET
,
ep
);
if
(
ep
->
stid
==
-
1
)
{
printk
(
KERN_ERR
MOD
"%s - cannot alloc
a
tid.
\n
"
,
__func__
);
printk
(
KERN_ERR
MOD
"%s - cannot alloc
s
tid.
\n
"
,
__func__
);
err
=
-
ENOMEM
;
goto
fail2
;
}
...
...
@@ -2243,6 +2161,116 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return
ret
;
}
/*
* These are the real handlers that are called from a
* work queue.
*/
static
c4iw_handler_func
work_handlers
[
NUM_CPL_CMDS
]
=
{
[
CPL_ACT_ESTABLISH
]
=
act_establish
,
[
CPL_ACT_OPEN_RPL
]
=
act_open_rpl
,
[
CPL_RX_DATA
]
=
rx_data
,
[
CPL_ABORT_RPL_RSS
]
=
abort_rpl
,
[
CPL_ABORT_RPL
]
=
abort_rpl
,
[
CPL_PASS_OPEN_RPL
]
=
pass_open_rpl
,
[
CPL_CLOSE_LISTSRV_RPL
]
=
close_listsrv_rpl
,
[
CPL_PASS_ACCEPT_REQ
]
=
pass_accept_req
,
[
CPL_PASS_ESTABLISH
]
=
pass_establish
,
[
CPL_PEER_CLOSE
]
=
peer_close
,
[
CPL_ABORT_REQ_RSS
]
=
peer_abort
,
[
CPL_CLOSE_CON_RPL
]
=
close_con_rpl
,
[
CPL_RDMA_TERMINATE
]
=
terminate
,
[
CPL_FW4_ACK
]
=
fw4_ack
};
static
void
process_timeout
(
struct
c4iw_ep
*
ep
)
{
struct
c4iw_qp_attributes
attrs
;
int
abort
=
1
;
spin_lock_irq
(
&
ep
->
com
.
lock
);
PDBG
(
"%s ep %p tid %u state %d
\n
"
,
__func__
,
ep
,
ep
->
hwtid
,
ep
->
com
.
state
);
switch
(
ep
->
com
.
state
)
{
case
MPA_REQ_SENT
:
__state_set
(
&
ep
->
com
,
ABORTING
);
connect_reply_upcall
(
ep
,
-
ETIMEDOUT
);
break
;
case
MPA_REQ_WAIT
:
__state_set
(
&
ep
->
com
,
ABORTING
);
break
;
case
CLOSING
:
case
MORIBUND
:
if
(
ep
->
com
.
cm_id
&&
ep
->
com
.
qp
)
{
attrs
.
next_state
=
C4IW_QP_STATE_ERROR
;
c4iw_modify_qp
(
ep
->
com
.
qp
->
rhp
,
ep
->
com
.
qp
,
C4IW_QP_ATTR_NEXT_STATE
,
&
attrs
,
1
);
}
__state_set
(
&
ep
->
com
,
ABORTING
);
break
;
default:
printk
(
KERN_ERR
"%s unexpected state ep %p tid %u state %u
\n
"
,
__func__
,
ep
,
ep
->
hwtid
,
ep
->
com
.
state
);
WARN_ON
(
1
);
abort
=
0
;
}
spin_unlock_irq
(
&
ep
->
com
.
lock
);
if
(
abort
)
abort_connection
(
ep
,
NULL
,
GFP_KERNEL
);
c4iw_put_ep
(
&
ep
->
com
);
}
static
void
process_timedout_eps
(
void
)
{
struct
c4iw_ep
*
ep
;
spin_lock_irq
(
&
timeout_lock
);
while
(
!
list_empty
(
&
timeout_list
))
{
struct
list_head
*
tmp
;
tmp
=
timeout_list
.
next
;
list_del
(
tmp
);
spin_unlock_irq
(
&
timeout_lock
);
ep
=
list_entry
(
tmp
,
struct
c4iw_ep
,
entry
);
process_timeout
(
ep
);
spin_lock_irq
(
&
timeout_lock
);
}
spin_unlock_irq
(
&
timeout_lock
);
}
static
void
process_work
(
struct
work_struct
*
work
)
{
struct
sk_buff
*
skb
=
NULL
;
struct
c4iw_dev
*
dev
;
struct
cpl_act_establish
*
rpl
=
cplhdr
(
skb
);
unsigned
int
opcode
;
int
ret
;
while
((
skb
=
skb_dequeue
(
&
rxq
)))
{
rpl
=
cplhdr
(
skb
);
dev
=
*
((
struct
c4iw_dev
**
)
(
skb
->
cb
+
sizeof
(
void
*
)));
opcode
=
rpl
->
ot
.
opcode
;
BUG_ON
(
!
work_handlers
[
opcode
]);
ret
=
work_handlers
[
opcode
](
dev
,
skb
);
if
(
!
ret
)
kfree_skb
(
skb
);
}
process_timedout_eps
();
}
static
DECLARE_WORK
(
skb_work
,
process_work
);
static
void
ep_timeout
(
unsigned
long
arg
)
{
struct
c4iw_ep
*
ep
=
(
struct
c4iw_ep
*
)
arg
;
spin_lock
(
&
timeout_lock
);
list_add_tail
(
&
ep
->
entry
,
&
timeout_list
);
spin_unlock
(
&
timeout_lock
);
queue_work
(
workq
,
&
skb_work
);
}
/*
* All the CM events are handled on a work queue to have a safe context.
*/
...
...
@@ -2273,58 +2301,74 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return
0
;
}
static
int
fw6_msg
(
struct
c4iw_dev
*
dev
,
struct
sk_buff
*
skb
)
{
struct
cpl_fw6_msg
*
rpl
=
cplhdr
(
skb
);
struct
c4iw_wr_wait
*
wr_waitp
;
int
ret
;
PDBG
(
"%s type %u
\n
"
,
__func__
,
rpl
->
type
);
switch
(
rpl
->
type
)
{
case
1
:
ret
=
(
int
)((
be64_to_cpu
(
rpl
->
data
[
0
])
>>
8
)
&
0xff
);
wr_waitp
=
(
__force
struct
c4iw_wr_wait
*
)
rpl
->
data
[
1
];
PDBG
(
"%s wr_waitp %p ret %u
\n
"
,
__func__
,
wr_waitp
,
ret
);
if
(
wr_waitp
)
{
wr_waitp
->
ret
=
ret
;
wr_waitp
->
done
=
1
;
wake_up
(
&
wr_waitp
->
wait
);
}
break
;
case
2
:
c4iw_ev_dispatch
(
dev
,
(
struct
t4_cqe
*
)
&
rpl
->
data
[
0
]);
break
;
default:
printk
(
KERN_ERR
MOD
"%s unexpected fw6 msg type %u
\n
"
,
__func__
,
rpl
->
type
);
break
;
}
return
0
;
}
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
*/
c4iw_handler_func
c4iw_handlers
[
NUM_CPL_CMDS
]
=
{
[
CPL_ACT_ESTABLISH
]
=
sched
,
[
CPL_ACT_OPEN_RPL
]
=
sched
,
[
CPL_RX_DATA
]
=
sched
,
[
CPL_ABORT_RPL_RSS
]
=
sched
,
[
CPL_ABORT_RPL
]
=
sched
,
[
CPL_PASS_OPEN_RPL
]
=
sched
,
[
CPL_CLOSE_LISTSRV_RPL
]
=
sched
,
[
CPL_PASS_ACCEPT_REQ
]
=
sched
,
[
CPL_PASS_ESTABLISH
]
=
sched
,
[
CPL_PEER_CLOSE
]
=
sched
,
[
CPL_CLOSE_CON_RPL
]
=
sched
,
[
CPL_ABORT_REQ_RSS
]
=
sched
,
[
CPL_RDMA_TERMINATE
]
=
sched
,
[
CPL_FW4_ACK
]
=
sched
,
[
CPL_SET_TCB_RPL
]
=
set_tcb_rpl
,
[
CPL_FW6_MSG
]
=
fw6_msg
};
int
__init
c4iw_cm_init
(
void
)
{
spin_lock_init
(
&
timeout_lock
);
skb_queue_head_init
(
&
rxq
);
workq
=
create_singlethread_workqueue
(
"iw_cxgb4"
);
if
(
!
workq
)
return
-
ENOMEM
;
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
*/
c4iw_handlers
[
CPL_ACT_ESTABLISH
]
=
sched
;
c4iw_handlers
[
CPL_ACT_OPEN_RPL
]
=
sched
;
c4iw_handlers
[
CPL_RX_DATA
]
=
sched
;
c4iw_handlers
[
CPL_ABORT_RPL_RSS
]
=
sched
;
c4iw_handlers
[
CPL_ABORT_RPL
]
=
sched
;
c4iw_handlers
[
CPL_PASS_OPEN_RPL
]
=
sched
;
c4iw_handlers
[
CPL_CLOSE_LISTSRV_RPL
]
=
sched
;
c4iw_handlers
[
CPL_PASS_ACCEPT_REQ
]
=
sched
;
c4iw_handlers
[
CPL_PASS_ESTABLISH
]
=
sched
;
c4iw_handlers
[
CPL_PEER_CLOSE
]
=
sched
;
c4iw_handlers
[
CPL_CLOSE_CON_RPL
]
=
sched
;
c4iw_handlers
[
CPL_ABORT_REQ_RSS
]
=
sched
;
c4iw_handlers
[
CPL_RDMA_TERMINATE
]
=
sched
;
c4iw_handlers
[
CPL_FW4_ACK
]
=
sched
;
c4iw_handlers
[
CPL_SET_TCB_RPL
]
=
set_tcb_rpl
;
c4iw_handlers
[
CPL_FW6_MSG
]
=
fw6_msg
;
/*
* These are the real handlers that are called from a
* work queue.
*/
work_handlers
[
CPL_ACT_ESTABLISH
]
=
act_establish
;
work_handlers
[
CPL_ACT_OPEN_RPL
]
=
act_open_rpl
;
work_handlers
[
CPL_RX_DATA
]
=
rx_data
;
work_handlers
[
CPL_ABORT_RPL_RSS
]
=
abort_rpl
;
work_handlers
[
CPL_ABORT_RPL
]
=
abort_rpl
;
work_handlers
[
CPL_PASS_OPEN_RPL
]
=
pass_open_rpl
;
work_handlers
[
CPL_CLOSE_LISTSRV_RPL
]
=
close_listsrv_rpl
;
work_handlers
[
CPL_PASS_ACCEPT_REQ
]
=
pass_accept_req
;
work_handlers
[
CPL_PASS_ESTABLISH
]
=
pass_establish
;
work_handlers
[
CPL_PEER_CLOSE
]
=
peer_close
;
work_handlers
[
CPL_ABORT_REQ_RSS
]
=
peer_abort
;
work_handlers
[
CPL_CLOSE_CON_RPL
]
=
close_con_rpl
;
work_handlers
[
CPL_RDMA_TERMINATE
]
=
terminate
;
work_handlers
[
CPL_FW4_ACK
]
=
fw4_ack
;
return
0
;
}
void
__exit
c4iw_cm_term
(
void
)
{
WARN_ON
(
!
list_empty
(
&
timeout_list
));
flush_workqueue
(
workq
);
destroy_workqueue
(
workq
);
}
drivers/infiniband/hw/cxgb4/ev.c
浏览文件 @
be4c9bad
...
...
@@ -51,8 +51,8 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
return
;
}
printk
(
KERN_ERR
"%s -
AE qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x
\n
"
,
__func__
,
printk
(
KERN_ERR
MOD
"
AE qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x
\n
"
,
CQE_QPID
(
err_cqe
),
CQE_OPCODE
(
err_cqe
),
CQE_STATUS
(
err_cqe
),
CQE_TYPE
(
err_cqe
),
CQE_WRID_HI
(
err_cqe
),
CQE_WRID_LOW
(
err_cqe
));
...
...
@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if
(
qhp
->
attr
.
state
==
C4IW_QP_STATE_RTS
)
{
attrs
.
next_state
=
C4IW_QP_STATE_TERMINATE
;
c4iw_modify_qp
(
qhp
->
rhp
,
qhp
,
C4IW_QP_ATTR_NEXT_STATE
,
&
attrs
,
0
);
&
attrs
,
1
);
}
event
.
event
=
ib_event
;
...
...
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
浏览文件 @
be4c9bad
...
...
@@ -597,6 +597,7 @@ struct c4iw_ep {
struct
c4iw_ep_common
com
;
struct
c4iw_ep
*
parent_ep
;
struct
timer_list
timer
;
struct
list_head
entry
;
unsigned
int
atid
;
u32
hwtid
;
u32
snd_seq
;
...
...
@@ -739,5 +740,6 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
extern
struct
cxgb4_client
t4c_client
;
extern
c4iw_handler_func
c4iw_handlers
[
NUM_CPL_CMDS
];
extern
int
c4iw_max_read_depth
;
#endif
drivers/infiniband/hw/cxgb4/provider.c
浏览文件 @
be4c9bad
...
...
@@ -267,8 +267,8 @@ static int c4iw_query_device(struct ib_device *ibdev,
props
->
max_qp_wr
=
T4_MAX_QP_DEPTH
;
props
->
max_sge
=
T4_MAX_RECV_SGE
;
props
->
max_sge_rd
=
1
;
props
->
max_qp_rd_atom
=
T4_MAX_READ_DEPTH
;
props
->
max_qp_init_rd_atom
=
T4_MAX_READ_DEPTH
;
props
->
max_qp_rd_atom
=
c4iw_max_read_depth
;
props
->
max_qp_init_rd_atom
=
c4iw_max_read_depth
;
props
->
max_cq
=
T4_MAX_NUM_CQ
;
props
->
max_cqe
=
T4_MAX_CQ_DEPTH
;
props
->
max_mr
=
c4iw_num_stags
(
&
dev
->
rdev
);
...
...
drivers/infiniband/hw/cxgb4/qp.c
浏览文件 @
be4c9bad
...
...
@@ -856,7 +856,8 @@ int c4iw_post_zb_read(struct c4iw_qp *qhp)
return
c4iw_ofld_send
(
&
qhp
->
rhp
->
rdev
,
skb
);
}
int
c4iw_post_terminate
(
struct
c4iw_qp
*
qhp
,
struct
t4_cqe
*
err_cqe
)
static
void
post_terminate
(
struct
c4iw_qp
*
qhp
,
struct
t4_cqe
*
err_cqe
,
gfp_t
gfp
)
{
struct
fw_ri_wr
*
wqe
;
struct
sk_buff
*
skb
;
...
...
@@ -865,9 +866,9 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
PDBG
(
"%s qhp %p qid 0x%x tid %u
\n
"
,
__func__
,
qhp
,
qhp
->
wq
.
sq
.
qid
,
qhp
->
ep
->
hwtid
);
skb
=
alloc_skb
(
sizeof
*
wqe
,
GFP_KERNEL
|
__GFP_NOFAIL
);
skb
=
alloc_skb
(
sizeof
*
wqe
,
gfp
);
if
(
!
skb
)
return
-
ENOMEM
;
return
;
set_wr_txq
(
skb
,
CPL_PRIORITY_DATA
,
qhp
->
ep
->
txq_idx
);
wqe
=
(
struct
fw_ri_wr
*
)
__skb_put
(
skb
,
sizeof
(
*
wqe
));
...
...
@@ -881,7 +882,7 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
wqe
->
u
.
terminate
.
immdlen
=
cpu_to_be32
(
sizeof
*
term
);
term
=
(
struct
terminate_message
*
)
wqe
->
u
.
terminate
.
termmsg
;
build_term_codes
(
err_cqe
,
&
term
->
layer_etype
,
&
term
->
ecode
);
return
c4iw_ofld_send
(
&
qhp
->
rhp
->
rdev
,
skb
);
c4iw_ofld_send
(
&
qhp
->
rhp
->
rdev
,
skb
);
}
/*
...
...
@@ -1130,14 +1131,14 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
if
(
mask
&
C4IW_QP_ATTR_ENABLE_RDMA_BIND
)
newattr
.
enable_bind
=
attrs
->
enable_bind
;
if
(
mask
&
C4IW_QP_ATTR_MAX_ORD
)
{
if
(
attrs
->
max_ord
>
T4_MAX_READ_DEPTH
)
{
if
(
attrs
->
max_ord
>
c4iw_max_read_depth
)
{
ret
=
-
EINVAL
;
goto
out
;
}
newattr
.
max_ord
=
attrs
->
max_ord
;
}
if
(
mask
&
C4IW_QP_ATTR_MAX_IRD
)
{
if
(
attrs
->
max_ird
>
T4_MAX_READ_DEPTH
)
{
if
(
attrs
->
max_ird
>
c4iw_max_read_depth
)
{
ret
=
-
EINVAL
;
goto
out
;
}
...
...
@@ -1215,12 +1216,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp
->
attr
.
state
=
C4IW_QP_STATE_TERMINATE
;
if
(
qhp
->
ibqp
.
uobject
)
t4_set_wq_in_error
(
&
qhp
->
wq
);
if
(
!
internal
)
{
ep
=
qhp
->
ep
;
c4iw_get_ep
(
&
ep
->
com
);
terminate
=
1
;
disconnect
=
1
;
}
ep
=
qhp
->
ep
;
c4iw_get_ep
(
&
ep
->
com
);
terminate
=
1
;
disconnect
=
1
;
break
;
case
C4IW_QP_STATE_ERROR
:
qhp
->
attr
.
state
=
C4IW_QP_STATE_ERROR
;
...
...
@@ -1301,7 +1300,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
spin_unlock_irqrestore
(
&
qhp
->
lock
,
flag
);
if
(
terminate
)
c4iw_post_terminate
(
qhp
,
NUL
L
);
post_terminate
(
qhp
,
NULL
,
internal
?
GFP_ATOMIC
:
GFP_KERNE
L
);
/*
* If disconnect is 1, then we need to initiate a disconnect
...
...
@@ -1309,7 +1308,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
* an abnormal close (RTS/CLOSING->ERROR).
*/
if
(
disconnect
)
{
c4iw_ep_disconnect
(
ep
,
abort
,
GFP_KERNEL
);
c4iw_ep_disconnect
(
ep
,
abort
,
internal
?
GFP_ATOMIC
:
GFP_KERNEL
);
c4iw_put_ep
(
&
ep
->
com
);
}
...
...
drivers/infiniband/hw/cxgb4/t4.h
浏览文件 @
be4c9bad
...
...
@@ -36,7 +36,6 @@
#include "t4_msg.h"
#include "t4fw_ri_api.h"
#define T4_MAX_READ_DEPTH 16
#define T4_QID_BASE 1024
#define T4_MAX_QIDS 256
#define T4_MAX_NUM_QP (1<<16)
...
...
@@ -450,11 +449,25 @@ struct t4_cq {
static
inline
int
t4_arm_cq
(
struct
t4_cq
*
cq
,
int
se
)
{
u32
val
;
val
=
SEINTARM
(
se
)
|
CIDXINC
(
cq
->
cidx_inc
)
|
TIMERREG
(
6
)
|
INGRESSQID
(
cq
->
cqid
);
cq
->
cidx_inc
=
0
;
writel
(
val
,
cq
->
gts
);
u16
inc
;
do
{
/*
* inc must be less the both the max update value -and-
* the size of the CQ.
*/
inc
=
cq
->
cidx_inc
<=
CIDXINC_MASK
?
cq
->
cidx_inc
:
CIDXINC_MASK
;
inc
=
inc
<=
(
cq
->
size
-
1
)
?
inc
:
(
cq
->
size
-
1
);
if
(
inc
==
cq
->
cidx_inc
)
val
=
SEINTARM
(
se
)
|
CIDXINC
(
inc
)
|
TIMERREG
(
6
)
|
INGRESSQID
(
cq
->
cqid
);
else
val
=
SEINTARM
(
0
)
|
CIDXINC
(
inc
)
|
TIMERREG
(
7
)
|
INGRESSQID
(
cq
->
cqid
);
cq
->
cidx_inc
-=
inc
;
writel
(
val
,
cq
->
gts
);
}
while
(
cq
->
cidx_inc
);
return
0
;
}
...
...
@@ -489,11 +502,12 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
static
inline
int
t4_next_hw_cqe
(
struct
t4_cq
*
cq
,
struct
t4_cqe
**
cqe
)
{
int
ret
=
0
;
u64
bits_type_ts
=
be64_to_cpu
(
cq
->
queue
[
cq
->
cidx
].
bits_type_ts
);
if
(
t4_valid_cqe
(
cq
,
&
cq
->
queue
[
cq
->
cidx
])
)
{
if
(
G_CQE_GENBIT
(
bits_type_ts
)
==
cq
->
gen
)
{
*
cqe
=
&
cq
->
queue
[
cq
->
cidx
];
cq
->
timestamp
=
CQE_TS
(
*
cqe
);
}
else
if
(
CQE_TS
(
&
cq
->
queue
[
cq
->
cidx
]
)
>
cq
->
timestamp
)
cq
->
timestamp
=
G_CQE_TS
(
bits_type_ts
);
}
else
if
(
G_CQE_TS
(
bits_type_ts
)
>
cq
->
timestamp
)
ret
=
-
EOVERFLOW
;
else
ret
=
-
ENODATA
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录