Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
d1038084
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d1038084
编写于
7月 14, 2017
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
vmci: the same on the send side...
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
53f58d8e
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
20 addition
and
69 deletion
+20
-69
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
+20
-69
未找到文件。
drivers/misc/vmw_vmci/vmci_queue_pair.c
浏览文件 @
d1038084
...
...
@@ -129,20 +129,6 @@
* *_MEM state, and vice versa.
*/
/*
* VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
* types are passed around to enqueue and dequeue routines. Note that
* often the functions passed are simply wrappers around memcpy
* itself.
*
* Note: In order for the memcpy typedefs to be compatible with the VMKernel,
* there's an unused last parameter for the hosted side. In
* ESX, that parameter holds a buffer type.
*/
typedef
int
vmci_memcpy_to_queue_func
(
struct
vmci_queue
*
queue
,
u64
queue_offset
,
const
void
*
src
,
size_t
src_offset
,
size_t
size
);
/* The Kernel specific component of the struct vmci_queue structure. */
struct
vmci_queue_kern_if
{
struct
mutex
__mutex
;
/* Protects the queue. */
...
...
@@ -348,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags)
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
static
int
__qp_memcpy_to_queue
(
struct
vmci_queue
*
queue
,
u64
queue_offset
,
const
void
*
src
,
size_t
size
,
bool
is_iovec
)
static
int
qp_memcpy_to_queue_iter
(
struct
vmci_queue
*
queue
,
u64
queue_offset
,
struct
iov_iter
*
from
,
size_t
size
)
{
struct
vmci_queue_kern_if
*
kernel_if
=
queue
->
kernel_if
;
size_t
bytes_copied
=
0
;
...
...
@@ -377,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
else
to_copy
=
size
-
bytes_copied
;
if
(
is_iovec
)
{
struct
msghdr
*
msg
=
(
struct
msghdr
*
)
src
;
int
err
;
/* The iovec will track bytes_copied internally. */
err
=
memcpy_from_msg
((
u8
*
)
va
+
page_offset
,
msg
,
to_copy
);
if
(
err
!=
0
)
{
if
(
kernel_if
->
host
)
kunmap
(
kernel_if
->
u
.
h
.
page
[
page_index
]);
return
VMCI_ERROR_INVALID_ARGS
;
}
}
else
{
memcpy
((
u8
*
)
va
+
page_offset
,
(
u8
*
)
src
+
bytes_copied
,
to_copy
);
if
(
!
copy_from_iter_full
((
u8
*
)
va
+
page_offset
,
to_copy
,
from
))
{
if
(
kernel_if
->
host
)
kunmap
(
kernel_if
->
u
.
h
.
page
[
page_index
]);
return
VMCI_ERROR_INVALID_ARGS
;
}
bytes_copied
+=
to_copy
;
if
(
kernel_if
->
host
)
kunmap
(
kernel_if
->
u
.
h
.
page
[
page_index
]);
...
...
@@ -554,30 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
return
VMCI_SUCCESS
;
}
static
int
qp_memcpy_to_queue
(
struct
vmci_queue
*
queue
,
u64
queue_offset
,
const
void
*
src
,
size_t
src_offset
,
size_t
size
)
{
return
__qp_memcpy_to_queue
(
queue
,
queue_offset
,
(
u8
*
)
src
+
src_offset
,
size
,
false
);
}
/*
* Copies from a given iovec from a VMCI Queue.
*/
static
int
qp_memcpy_to_queue_iov
(
struct
vmci_queue
*
queue
,
u64
queue_offset
,
const
void
*
msg
,
size_t
src_offset
,
size_t
size
)
{
/*
* We ignore src_offset because src is really a struct iovec * and will
* maintain offset internally.
*/
return
__qp_memcpy_to_queue
(
queue
,
queue_offset
,
msg
,
size
,
true
);
}
/*
* Allocates kernel VA space of specified size plus space for the queue
* and kernel interface. This is different from the guest queue allocator,
...
...
@@ -2590,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
static
ssize_t
qp_enqueue_locked
(
struct
vmci_queue
*
produce_q
,
struct
vmci_queue
*
consume_q
,
const
u64
produce_q_size
,
const
void
*
buf
,
size_t
buf_size
,
vmci_memcpy_to_queue_func
memcpy_to_queue
)
struct
iov_iter
*
from
)
{
s64
free_space
;
u64
tail
;
size_t
buf_size
=
iov_iter_count
(
from
);
size_t
written
;
ssize_t
result
;
...
...
@@ -2615,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
written
=
(
size_t
)
(
free_space
>
buf_size
?
buf_size
:
free_space
);
tail
=
vmci_q_header_producer_tail
(
produce_q
->
q_header
);
if
(
likely
(
tail
+
written
<
produce_q_size
))
{
result
=
memcpy_to_queue
(
produce_q
,
tail
,
buf
,
0
,
written
);
result
=
qp_memcpy_to_queue_iter
(
produce_q
,
tail
,
from
,
written
);
}
else
{
/* Tail pointer wraps around. */
const
size_t
tmp
=
(
size_t
)
(
produce_q_size
-
tail
);
result
=
memcpy_to_queue
(
produce_q
,
tail
,
buf
,
0
,
tmp
);
result
=
qp_memcpy_to_queue_iter
(
produce_q
,
tail
,
from
,
tmp
);
if
(
result
>=
VMCI_SUCCESS
)
result
=
memcpy_to_queue
(
produce_q
,
0
,
buf
,
tmp
,
result
=
qp_memcpy_to_queue_iter
(
produce_q
,
0
,
from
,
written
-
tmp
);
}
...
...
@@ -3078,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
int
buf_type
)
{
ssize_t
result
;
struct
iov_iter
from
;
struct
kvec
v
=
{.
iov_base
=
(
void
*
)
buf
,
.
iov_len
=
buf_size
};
if
(
!
qpair
||
!
buf
)
return
VMCI_ERROR_INVALID_ARGS
;
iov_iter_kvec
(
&
from
,
WRITE
|
ITER_KVEC
,
&
v
,
1
,
buf_size
);
qp_lock
(
qpair
);
do
{
result
=
qp_enqueue_locked
(
qpair
->
produce_q
,
qpair
->
consume_q
,
qpair
->
produce_q_size
,
buf
,
buf_size
,
qp_memcpy_to_queue
);
&
from
);
if
(
result
==
VMCI_ERROR_QUEUEPAIR_NOT_READY
&&
!
qp_wait_for_ready_queue
(
qpair
))
...
...
@@ -3219,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
result
=
qp_enqueue_locked
(
qpair
->
produce_q
,
qpair
->
consume_q
,
qpair
->
produce_q_size
,
msg
,
msg_data_left
(
msg
),
qp_memcpy_to_queue_iov
);
&
msg
->
msg_iter
);
if
(
result
==
VMCI_ERROR_QUEUEPAIR_NOT_READY
&&
!
qp_wait_for_ready_queue
(
qpair
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录