Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
788636f0
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
788636f0
编写于
4月 18, 2018
作者:
T
typhoonzero
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update by comments
上级
e2d56832
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
3 addition
and
38 deletion
+3
-38
paddle/fluid/framework/tensor.h
paddle/fluid/framework/tensor.h
+0
-3
paddle/fluid/framework/tensor_impl.h
paddle/fluid/framework/tensor_impl.h
+0
-31
paddle/fluid/operators/split_byref_op.h
paddle/fluid/operators/split_byref_op.h
+3
-4
未找到文件。
paddle/fluid/framework/tensor.h
浏览文件 @
788636f0
...
...
@@ -98,9 +98,6 @@ class Tensor {
/*! The internal of two tensors share the same memory block. */
inline
Tensor
&
ShareDataWith
(
const
Tensor
&
src
);
/*! Share part of the memory of the two tensors */
inline
Tensor
&
ShareDataWith
(
const
Tensor
*
src
,
size_t
offset
);
/**
* @brief Return a sub-tensor of the given tensor.
*
...
...
paddle/fluid/framework/tensor_impl.h
浏览文件 @
788636f0
...
...
@@ -162,37 +162,6 @@ inline Tensor& Tensor::ShareDataWith(const Tensor& src) {
return
*
this
;
}
inline
Tensor
&
Tensor
::
ShareDataWith
(
const
Tensor
*
src
,
size_t
offset
)
{
// NOTE: data size is determined by current tensor shape and data type
src
->
check_memory_size
();
PADDLE_ENFORCE_EQ
(
src
->
type
(),
this
->
type
(),
"tensor data type must be the same when sharing data"
);
auto
place
=
src
->
place
();
auto
type
=
src
->
type
();
size_t
size
=
src
->
numel
()
*
SizeOfType
(
src
->
type
());
auto
*
ref
=
src
->
data
<
uint8_t
>
()
+
offset
;
if
(
platform
::
is_cpu_place
(
place
))
{
holder_
.
reset
(
new
SharedPlaceholderImpl
<
platform
::
CPUPlace
>
(
boost
::
get
<
platform
::
CPUPlace
>
(
place
),
ref
,
size
,
type
));
}
else
if
(
platform
::
is_gpu_place
(
place
)
||
platform
::
is_cuda_pinned_place
(
place
))
{
#ifndef PADDLE_WITH_CUDA
PADDLE_THROW
(
"CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."
);
}
#else
if
(
platform
::
is_gpu_place
(
place
))
{
holder_
.
reset
(
new
SharedPlaceholderImpl
<
platform
::
CUDAPlace
>
(
boost
::
get
<
platform
::
CUDAPlace
>
(
place
),
ref
,
size
,
type
));
}
else
if
(
platform
::
is_cuda_pinned_place
(
place
))
{
holder_
.
reset
(
new
SharedPlaceholderImpl
<
platform
::
CUDAPinnedPlace
>
(
boost
::
get
<
platform
::
CUDAPinnedPlace
>
(
place
),
ref
,
size
,
type
));
}
}
#endif
return
*
this
;
}
inline
Tensor
Tensor
::
Slice
(
int
begin_idx
,
int
end_idx
)
const
{
check_memory_size
();
PADDLE_ENFORCE_GE
(
begin_idx
,
0
,
...
...
paddle/fluid/operators/split_byref_op.h
浏览文件 @
788636f0
...
...
@@ -26,15 +26,14 @@ class SplitByrefOpKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
outs
=
ctx
.
MultiOutput
<
framework
::
Tensor
>
(
"Out"
);
auto
in_stride
=
framework
::
stride_numel
(
in
->
dims
());
auto
place
=
ctx
.
GetPlace
();
size_t
input
_offset
=
0
;
size_t
row
_offset
=
0
;
for
(
size_t
i
=
0
;
i
<
outs
.
size
();
++
i
)
{
// NOTE: no need to call mutable_data here to allocate memory.
auto
*
out
=
outs
[
i
];
out
->
ShareDataWith
(
in
,
input_offset
);
input_offset
+=
out
->
numel
()
*
framework
::
SizeOfType
(
out
->
type
())
;
*
out
=
std
::
move
(
in
->
Slice
(
row_offset
,
out
->
dims
()[
0
])
);
row_offset
+=
out
->
dims
()[
0
]
;
}
}
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录