Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
575d34b6
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
575d34b6
编写于
11月 14, 2017
作者:
V
Vinod Koul
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'topic/bcom' into for-linus
上级
049d0d38
7076a1e4
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
38 addition
and
81 deletion
+38
-81
drivers/dma/Kconfig
drivers/dma/Kconfig
+1
-1
drivers/dma/bcm-sba-raid.c
drivers/dma/bcm-sba-raid.c
+37
-80
未找到文件。
drivers/dma/Kconfig
浏览文件 @
575d34b6
...
...
@@ -115,7 +115,7 @@ config BCM_SBA_RAID
select DMA_ENGINE_RAID
select ASYNC_TX_DISABLE_XOR_VAL_DMA
select ASYNC_TX_DISABLE_PQ_VAL_DMA
default ARCH_BCM_IPROC
default
m if
ARCH_BCM_IPROC
help
Enable support for Broadcom SBA RAID Engine. The SBA RAID
engine is available on most of the Broadcom iProc SoCs. It
...
...
drivers/dma/bcm-sba-raid.c
浏览文件 @
575d34b6
/*
* Copyright (C) 2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
...
...
@@ -25,11 +30,8 @@
*
* The Broadcom SBA RAID driver does not require any register programming
* except submitting request to SBA hardware device via mailbox channels.
* This driver implements a DMA device with one DMA channel using a set
* of mailbox channels provided by Broadcom SoC specific ring manager
* driver. To exploit parallelism (as described above), all DMA request
* coming to SBA RAID DMA channel are broken down to smaller requests
* and submitted to multiple mailbox channels in round-robin fashion.
* This driver implements a DMA device with one DMA channel using a single
* mailbox channel provided by Broadcom SoC specific ring manager driver.
* For having more SBA DMA channels, we can create more SBA device nodes
* in Broadcom SoC specific DTS based on number of hardware rings supported
* by Broadcom SoC ring manager.
...
...
@@ -85,6 +87,7 @@
#define SBA_CMD_GALOIS 0xe
#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
/* Driver helper macros */
#define to_sba_request(tx) \
...
...
@@ -142,9 +145,7 @@ struct sba_device {
u32
max_cmds_pool_size
;
/* Maibox client and Mailbox channels */
struct
mbox_client
client
;
int
mchans_count
;
atomic_t
mchans_current
;
struct
mbox_chan
**
mchans
;
struct
mbox_chan
*
mchan
;
struct
device
*
mbox_dev
;
/* DMA device and DMA channel */
struct
dma_device
dma_dev
;
...
...
@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
/* ====== General helper routines ===== */
static
void
sba_peek_mchans
(
struct
sba_device
*
sba
)
{
int
mchan_idx
;
for
(
mchan_idx
=
0
;
mchan_idx
<
sba
->
mchans_count
;
mchan_idx
++
)
mbox_client_peek_data
(
sba
->
mchans
[
mchan_idx
]);
}
static
struct
sba_request
*
sba_alloc_request
(
struct
sba_device
*
sba
)
{
bool
found
=
false
;
...
...
@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
* would have completed which will create more
* room for new requests.
*/
sba_peek_mchans
(
sba
);
mbox_client_peek_data
(
sba
->
mchan
);
return
NULL
;
}
...
...
@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
static
int
sba_send_mbox_request
(
struct
sba_device
*
sba
,
struct
sba_request
*
req
)
{
int
mchans_idx
,
ret
=
0
;
/* Select mailbox channel in round-robin fashion */
mchans_idx
=
atomic_inc_return
(
&
sba
->
mchans_current
);
mchans_idx
=
mchans_idx
%
sba
->
mchans_count
;
int
ret
=
0
;
/* Send message for the request */
req
->
msg
.
error
=
0
;
ret
=
mbox_send_message
(
sba
->
mchan
s
[
mchans_idx
]
,
&
req
->
msg
);
ret
=
mbox_send_message
(
sba
->
mchan
,
&
req
->
msg
);
if
(
ret
<
0
)
{
dev_err
(
sba
->
dev
,
"send message failed with error %d"
,
ret
);
return
ret
;
...
...
@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba,
}
/* Signal txdone for mailbox channel */
mbox_client_txdone
(
sba
->
mchan
s
[
mchans_idx
]
,
ret
);
mbox_client_txdone
(
sba
->
mchan
,
ret
);
return
ret
;
}
...
...
@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba)
u32
count
;
struct
sba_request
*
req
;
/*
* Process few pending requests
*
* For now, we process (<number_of_mailbox_channels> * 8)
* number of requests at a time.
*/
count
=
sba
->
mchans_count
*
8
;
/* Process few pending requests */
count
=
SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL
;
while
(
!
list_empty
(
&
sba
->
reqs_pending_list
)
&&
count
)
{
/* Get the first pending request */
req
=
list_first_entry
(
&
sba
->
reqs_pending_list
,
...
...
@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba,
WARN_ON
(
tx
->
cookie
<
0
);
if
(
tx
->
cookie
>
0
)
{
spin_lock_irqsave
(
&
sba
->
reqs_lock
,
flags
);
dma_cookie_complete
(
tx
);
spin_unlock_irqrestore
(
&
sba
->
reqs_lock
,
flags
);
dmaengine_desc_get_callback_invoke
(
tx
,
NULL
);
dma_descriptor_unmap
(
tx
);
tx
->
callback
=
NULL
;
...
...
@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
if
(
ret
==
DMA_COMPLETE
)
return
ret
;
sba_peek_mchans
(
sba
);
mbox_client_peek_data
(
sba
->
mchan
);
return
dma_cookie_status
(
dchan
,
cookie
,
txstate
);
}
...
...
@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba)
static
int
sba_probe
(
struct
platform_device
*
pdev
)
{
int
i
,
ret
=
0
,
mchans_count
;
int
ret
=
0
;
struct
sba_device
*
sba
;
struct
platform_device
*
mbox_pdev
;
struct
of_phandle_args
args
;
...
...
@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev)
sba
->
dev
=
&
pdev
->
dev
;
platform_set_drvdata
(
pdev
,
sba
);
/* Number of
channels equals number of mailbox channels
*/
/* Number of
mailbox channels should be atleast 1
*/
ret
=
of_count_phandle_with_args
(
pdev
->
dev
.
of_node
,
"mboxes"
,
"#mbox-cells"
);
if
(
ret
<=
0
)
return
-
ENODEV
;
mchans_count
=
ret
;
/* Determine SBA version from DT compatible string */
if
(
of_device_is_compatible
(
sba
->
dev
->
of_node
,
"brcm,iproc-sba"
))
...
...
@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev)
default:
return
-
EINVAL
;
}
sba
->
max_req
=
SBA_MAX_REQ_PER_MBOX_CHANNEL
*
mchans_count
;
sba
->
max_req
=
SBA_MAX_REQ_PER_MBOX_CHANNEL
;
sba
->
max_cmd_per_req
=
sba
->
max_pq_srcs
+
3
;
sba
->
max_xor_srcs
=
sba
->
max_cmd_per_req
-
1
;
sba
->
max_resp_pool_size
=
sba
->
max_req
*
sba
->
hw_resp_size
;
...
...
@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev)
sba
->
client
.
knows_txdone
=
true
;
sba
->
client
.
tx_tout
=
0
;
/* Allocate mailbox channel array */
sba
->
mchans
=
devm_kcalloc
(
&
pdev
->
dev
,
mchans_count
,
sizeof
(
*
sba
->
mchans
),
GFP_KERNEL
);
if
(
!
sba
->
mchans
)
return
-
ENOMEM
;
/* Request mailbox channels */
sba
->
mchans_count
=
0
;
for
(
i
=
0
;
i
<
mchans_count
;
i
++
)
{
sba
->
mchans
[
i
]
=
mbox_request_channel
(
&
sba
->
client
,
i
);
if
(
IS_ERR
(
sba
->
mchans
[
i
]))
{
ret
=
PTR_ERR
(
sba
->
mchans
[
i
]);
goto
fail_free_mchans
;
}
sba
->
mchans_count
++
;
/* Request mailbox channel */
sba
->
mchan
=
mbox_request_channel
(
&
sba
->
client
,
0
);
if
(
IS_ERR
(
sba
->
mchan
))
{
ret
=
PTR_ERR
(
sba
->
mchan
);
goto
fail_free_mchan
;
}
atomic_set
(
&
sba
->
mchans_current
,
0
);
/* Find-out underlying mailbox device */
ret
=
of_parse_phandle_with_args
(
pdev
->
dev
.
of_node
,
"mboxes"
,
"#mbox-cells"
,
0
,
&
args
);
if
(
ret
)
goto
fail_free_mchan
s
;
goto
fail_free_mchan
;
mbox_pdev
=
of_find_device_by_node
(
args
.
np
);
of_node_put
(
args
.
np
);
if
(
!
mbox_pdev
)
{
ret
=
-
ENODEV
;
goto
fail_free_mchan
s
;
goto
fail_free_mchan
;
}
sba
->
mbox_dev
=
&
mbox_pdev
->
dev
;
/* All mailbox channels should be of same ring manager device */
for
(
i
=
1
;
i
<
mchans_count
;
i
++
)
{
ret
=
of_parse_phandle_with_args
(
pdev
->
dev
.
of_node
,
"mboxes"
,
"#mbox-cells"
,
i
,
&
args
);
if
(
ret
)
goto
fail_free_mchans
;
mbox_pdev
=
of_find_device_by_node
(
args
.
np
);
of_node_put
(
args
.
np
);
if
(
sba
->
mbox_dev
!=
&
mbox_pdev
->
dev
)
{
ret
=
-
EINVAL
;
goto
fail_free_mchans
;
}
}
/* Prealloc channel resource */
ret
=
sba_prealloc_channel_resources
(
sba
);
if
(
ret
)
goto
fail_free_mchan
s
;
goto
fail_free_mchan
;
/* Check availability of debugfs */
if
(
!
debugfs_initialized
())
...
...
@@ -1777,24 +1737,22 @@ static int sba_probe(struct platform_device *pdev)
goto
fail_free_resources
;
/* Print device info */
dev_info
(
sba
->
dev
,
"%s using SBAv%d
and %d mailbox channel
s"
,
dev_info
(
sba
->
dev
,
"%s using SBAv%d
mailbox channel from %
s"
,
dma_chan_name
(
&
sba
->
dma_chan
),
sba
->
ver
+
1
,
sba
->
mchans_count
);
dev_name
(
sba
->
mbox_dev
)
);
return
0
;
fail_free_resources:
debugfs_remove_recursive
(
sba
->
root
);
sba_freeup_channel_resources
(
sba
);
fail_free_mchans:
for
(
i
=
0
;
i
<
sba
->
mchans_count
;
i
++
)
mbox_free_channel
(
sba
->
mchans
[
i
]);
fail_free_mchan:
mbox_free_channel
(
sba
->
mchan
);
return
ret
;
}
static
int
sba_remove
(
struct
platform_device
*
pdev
)
{
int
i
;
struct
sba_device
*
sba
=
platform_get_drvdata
(
pdev
);
dma_async_device_unregister
(
&
sba
->
dma_dev
);
...
...
@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources
(
sba
);
for
(
i
=
0
;
i
<
sba
->
mchans_count
;
i
++
)
mbox_free_channel
(
sba
->
mchans
[
i
]);
mbox_free_channel
(
sba
->
mchan
);
return
0
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录