Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b708ec0a
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b708ec0a
编写于
5月 07, 2018
作者:
F
fengjiayi
提交者:
GitHub
5月 07, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #10412 from JiayiFeng/correct_TensorCopy_misuse
Correct tensor copy misuse
上级
76b63c25
0c99cd7b
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
25 addition
and
30 deletion
+25
-30
paddle/fluid/operators/lod_reset_op.h
paddle/fluid/operators/lod_reset_op.h
+1
-2
paddle/fluid/operators/math/concat_test.cc
paddle/fluid/operators/math/concat_test.cc
+16
-16
paddle/fluid/operators/math/sequence_padding_test.cc
paddle/fluid/operators/math/sequence_padding_test.cc
+2
-2
paddle/fluid/operators/multiplex_op.cu
paddle/fluid/operators/multiplex_op.cu
+2
-2
paddle/fluid/operators/sequence_slice_op.h
paddle/fluid/operators/sequence_slice_op.h
+4
-8
未找到文件。
paddle/fluid/operators/lod_reset_op.h
浏览文件 @
b708ec0a
...
...
@@ -46,8 +46,7 @@ class LoDResetKernel : public framework::OpKernel<T> {
auto
*
lod
=
lod_t
->
data
<
int
>
();
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
framework
::
Tensor
lod_cpu
;
framework
::
TensorCopy
(
*
lod_t
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
lod_cpu
);
framework
::
TensorCopySync
(
*
lod_t
,
platform
::
CPUPlace
(),
&
lod_cpu
);
lod
=
lod_cpu
.
data
<
int
>
();
}
level0
=
std
::
vector
<
int
>
(
lod
,
lod
+
lod_t
->
numel
());
...
...
paddle/fluid/operators/math/concat_test.cc
浏览文件 @
b708ec0a
...
...
@@ -69,8 +69,8 @@ void testConcat() {
}
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
input_a_cpu
,
Place
(),
*
context
,
&
input_a
);
paddle
::
framework
::
TensorCopy
(
input_b_cpu
,
Place
(),
*
context
,
&
input_b
);
paddle
::
framework
::
TensorCopy
Sync
(
input_a_cpu
,
Place
()
,
&
input_a
);
paddle
::
framework
::
TensorCopy
Sync
(
input_b_cpu
,
Place
()
,
&
input_b
);
}
std
::
vector
<
paddle
::
framework
::
Tensor
>
input
;
...
...
@@ -86,8 +86,8 @@ void testConcat() {
int
*
out_ptr
;
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
out
,
paddle
::
platform
::
CPUPlace
(),
*
context
,
&
out_cpu
);
paddle
::
framework
::
TensorCopy
Sync
(
out
,
paddle
::
platform
::
CPUPlace
()
,
&
out_cpu
);
out_ptr
=
out_cpu
.
data
<
int
>
();
}
else
{
out_ptr
=
out
.
data
<
int
>
();
...
...
@@ -142,8 +142,8 @@ void testConcat() {
}
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
input_a_cpu
,
Place
(),
*
context
,
&
input_a
);
paddle
::
framework
::
TensorCopy
(
input_b_cpu
,
Place
(),
*
context
,
&
input_b
);
paddle
::
framework
::
TensorCopy
Sync
(
input_a_cpu
,
Place
()
,
&
input_a
);
paddle
::
framework
::
TensorCopy
Sync
(
input_b_cpu
,
Place
()
,
&
input_b
);
}
input
.
clear
();
...
...
@@ -157,8 +157,8 @@ void testConcat() {
PADDLE_ENFORCE_EQ
(
input_b
.
dims
(),
dim_b
);
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
out
,
paddle
::
platform
::
CPUPlace
(),
*
context
,
&
out_cpu
);
paddle
::
framework
::
TensorCopy
Sync
(
out
,
paddle
::
platform
::
CPUPlace
()
,
&
out_cpu
);
out_ptr
=
out_cpu
.
data
<
int
>
();
}
else
{
out_ptr
=
out
.
data
<
int
>
();
...
...
@@ -215,8 +215,8 @@ void testConcat() {
}
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
input_a_cpu
,
Place
(),
*
context
,
&
input_a
);
paddle
::
framework
::
TensorCopy
(
input_b_cpu
,
Place
(),
*
context
,
&
input_b
);
paddle
::
framework
::
TensorCopy
Sync
(
input_a_cpu
,
Place
()
,
&
input_a
);
paddle
::
framework
::
TensorCopy
Sync
(
input_b_cpu
,
Place
()
,
&
input_b
);
}
input
.
clear
();
...
...
@@ -230,8 +230,8 @@ void testConcat() {
PADDLE_ENFORCE_EQ
(
input_b
.
dims
(),
dim_b
);
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
out
,
paddle
::
platform
::
CPUPlace
(),
*
context
,
&
out_cpu
);
paddle
::
framework
::
TensorCopy
Sync
(
out
,
paddle
::
platform
::
CPUPlace
()
,
&
out_cpu
);
out_ptr
=
out_cpu
.
data
<
int
>
();
}
else
{
out_ptr
=
out
.
data
<
int
>
();
...
...
@@ -290,8 +290,8 @@ void testConcat() {
}
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
input_a_cpu
,
Place
(),
*
context
,
&
input_a
);
paddle
::
framework
::
TensorCopy
(
input_b_cpu
,
Place
(),
*
context
,
&
input_b
);
paddle
::
framework
::
TensorCopy
Sync
(
input_a_cpu
,
Place
()
,
&
input_a
);
paddle
::
framework
::
TensorCopy
Sync
(
input_b_cpu
,
Place
()
,
&
input_b
);
}
input
.
clear
();
...
...
@@ -305,8 +305,8 @@ void testConcat() {
PADDLE_ENFORCE_EQ
(
input_b
.
dims
(),
dim_b
);
if
(
paddle
::
platform
::
is_gpu_place
(
Place
()))
{
paddle
::
framework
::
TensorCopy
(
out
,
paddle
::
platform
::
CPUPlace
(),
*
context
,
&
out_cpu
);
paddle
::
framework
::
TensorCopy
Sync
(
out
,
paddle
::
platform
::
CPUPlace
()
,
&
out_cpu
);
out_ptr
=
out_cpu
.
data
<
int
>
();
}
else
{
out_ptr
=
out
.
data
<
int
>
();
...
...
paddle/fluid/operators/math/sequence_padding_test.cc
浏览文件 @
b708ec0a
...
...
@@ -41,7 +41,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod,
if
(
paddle
::
platform
::
is_cpu_place
(
*
place
))
{
seq
=
cpu_seq
;
}
else
{
TensorCopy
(
cpu_seq
,
*
place
,
*
context
,
&
seq
);
TensorCopy
Sync
(
cpu_seq
,
*
place
,
&
seq
);
seq
.
set_lod
(
lod
);
}
...
...
@@ -64,7 +64,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod,
if
(
paddle
::
platform
::
is_cpu_place
(
*
place
))
{
cpu_seq_back
=
seq_back
;
}
else
{
TensorCopy
(
seq_back
,
paddle
::
platform
::
CPUPlace
(),
*
context
,
&
cpu_seq_back
);
TensorCopy
Sync
(
seq_back
,
paddle
::
platform
::
CPUPlace
()
,
&
cpu_seq_back
);
cpu_seq_back
.
set_lod
(
lod
);
}
...
...
paddle/fluid/operators/multiplex_op.cu
浏览文件 @
b708ec0a
...
...
@@ -33,7 +33,7 @@ class MultiplexGPUKernel : public framework::OpKernel<T> {
auto
cols
=
ins
[
0
]
->
numel
()
/
rows
;
// copy index to cpu
Tensor
index_t_cpu
;
TensorCopy
(
*
ids
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
index_t_cpu
);
TensorCopy
Sync
(
*
ids
,
platform
::
CPUPlace
(),
&
index_t_cpu
);
auto
*
index
=
index_t_cpu
.
data
<
int32_t
>
();
auto
stream
=
ctx
.
cuda_device_context
().
stream
();
platform
::
CUDAPlace
place
=
boost
::
get
<
platform
::
CUDAPlace
>
(
ctx
.
GetPlace
());
...
...
@@ -69,7 +69,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel<T> {
auto
cols
=
ins
[
0
]
->
numel
()
/
rows
;
// copy index to cpu
Tensor
index_t_cpu
;
TensorCopy
(
*
ids
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
index_t_cpu
);
TensorCopy
Sync
(
*
ids
,
platform
::
CPUPlace
(),
&
index_t_cpu
);
auto
*
index
=
index_t_cpu
.
data
<
int32_t
>
();
auto
stream
=
ctx
.
cuda_device_context
().
stream
();
...
...
paddle/fluid/operators/sequence_slice_op.h
浏览文件 @
b708ec0a
...
...
@@ -66,13 +66,11 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> {
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
offset_cpu
.
mutable_data
<
T
>
(
offset
->
dims
(),
platform
::
CPUPlace
());
framework
::
TensorCopy
(
*
offset
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
offset_cpu
);
framework
::
TensorCopySync
(
*
offset
,
platform
::
CPUPlace
(),
&
offset_cpu
);
offset_data
=
offset_cpu
.
data
<
int64_t
>
();
length_cpu
.
mutable_data
<
T
>
(
length
->
dims
(),
platform
::
CPUPlace
());
framework
::
TensorCopy
(
*
length
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
length_cpu
);
framework
::
TensorCopySync
(
*
length
,
platform
::
CPUPlace
(),
&
length_cpu
);
length_data
=
length_cpu
.
data
<
int64_t
>
();
}
...
...
@@ -127,13 +125,11 @@ class SequenceSliceGradOpKernel : public framework::OpKernel<T> {
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
offset_cpu
.
mutable_data
<
T
>
(
offset
->
dims
(),
platform
::
CPUPlace
());
framework
::
TensorCopy
(
*
offset
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
offset_cpu
);
framework
::
TensorCopySync
(
*
offset
,
platform
::
CPUPlace
(),
&
offset_cpu
);
offset_data
=
offset_cpu
.
data
<
int64_t
>
();
length_cpu
.
mutable_data
<
T
>
(
length
->
dims
(),
platform
::
CPUPlace
());
framework
::
TensorCopy
(
*
length
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
length_cpu
);
framework
::
TensorCopySync
(
*
length
,
platform
::
CPUPlace
(),
&
length_cpu
);
length_data
=
length_cpu
.
data
<
int64_t
>
();
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录