Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
bf412f46
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bf412f46
编写于
10月 15, 2020
作者:
Z
Zhou Wei
提交者:
GitHub
10月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add tensor clone (#27953)
* add tensor clone * fix unittest test_var_base
上级
2e845182
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
61 addition
and
3 deletion
+61
-3
paddle/fluid/operators/assign_op.h
paddle/fluid/operators/assign_op.h
+2
-2
paddle/fluid/pybind/imperative.cc
paddle/fluid/pybind/imperative.cc
+48
-0
python/paddle/fluid/tests/unittests/test_var_base.py
python/paddle/fluid/tests/unittests/test_var_base.py
+11
-1
未找到文件。
paddle/fluid/operators/assign_op.h
浏览文件 @
bf412f46
...
...
@@ -54,7 +54,7 @@ class AssignFunctor {
out_rows
.
set_height
(
rows
.
height
());
auto
&
t
=
rows
.
value
();
auto
*
m
=
out_rows
.
mutable_value
();
framework
::
TensorCopy
(
t
,
dev_ctx_
.
GetPlace
(),
dev_ctx_
,
m
);
framework
::
TensorCopy
(
t
,
t
.
place
()
,
m
);
}
template
<
typename
T
>
...
...
@@ -70,7 +70,7 @@ class AssignFunctor {
framework
::
LoDTensor
*
out
)
const
{
if
(
lod_tensor
.
numel
()
==
0
)
return
;
auto
&
out_tensor
=
*
out
;
TensorCopy
(
lod_tensor
,
dev_ctx_
.
GetPlace
(),
dev_ctx_
,
&
out_tensor
);
TensorCopy
(
lod_tensor
,
lod_tensor
.
place
()
,
&
out_tensor
);
out_tensor
.
set_lod
(
lod_tensor
.
lod
());
}
...
...
paddle/fluid/pybind/imperative.cc
浏览文件 @
bf412f46
...
...
@@ -718,6 +718,54 @@ void BindImperative(py::module *m_ptr) {
loss.clear_gradient()
print("After clear_gradient {}".format(loss.grad))
)DOC"
)
.
def
(
"clone"
,
[](
std
::
shared_ptr
<
imperative
::
VarBase
>
&
self
)
{
const
auto
&
tensor
=
self
->
Var
().
Get
<
framework
::
LoDTensor
>
();
PADDLE_ENFORCE_EQ
(
tensor
.
IsInitialized
(),
true
,
platform
::
errors
::
InvalidArgument
(
"%s has not been initialized"
,
self
->
Name
()));
auto
tracer
=
imperative
::
GetCurrentTracer
();
auto
new_var
=
std
::
make_shared
<
imperative
::
VarBase
>
(
true
,
tracer
->
GenerateUniqueName
(
self
->
Name
()
+
"_clone"
));
framework
::
AttributeMap
attrs
;
imperative
::
NameVarBaseMap
ins
=
{{
"X"
,
{
self
}}};
imperative
::
NameVarBaseMap
outs
=
{{
"Out"
,
{
new_var
}}};
tracer
->
TraceOp
(
"assign"
,
ins
,
outs
,
attrs
);
return
new_var
;
},
py
::
return_value_policy
::
copy
,
R"DOC(
Returns a new Tensor, which is clone of origin Tensor, and it remains in the current graph.
It will always have a Tensor copy.
Tn addition, the cloned Tensor provides gradient propagation.
Returns: The cloned Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.0, stop_gradient=False)
clone_x = x.clone()
y = clone_x**2
y.backward()
print(clone_x.stop_gradient) # False
print(clone_x.grad) # [2.0], support gradient propagation
print(x.stop_gradient) # False
print(x.grad) # [2.0], clone_x support gradient propagation for x
x = paddle.to_tensor(1.0)
clone_x = x.clone()
clone_x.stop_gradient = False
z = clone_x**3
z.backward()
print(clone_x.stop_gradient) # False
print(clone_x.grad) # [3.0], support gradient propagation
print(x.stop_gradient) # True
print(x.grad) # None
)DOC"
)
.
def
(
"_run_backward"
,
[](
imperative
::
VarBase
&
self
,
const
imperative
::
Tracer
&
tracer
,
bool
retain_graph
)
{
...
...
python/paddle/fluid/tests/unittests/test_var_base.py
浏览文件 @
bf412f46
...
...
@@ -55,6 +55,15 @@ class TestVarBase(unittest.TestCase):
np
.
array_equal
(
x
.
numpy
(),
np
.
array
([
1.2
]).
astype
(
'float32'
)))
self
.
assertEqual
(
x
.
dtype
,
core
.
VarDesc
.
VarType
.
FP32
)
clone_x
=
x
.
clone
()
self
.
assertTrue
(
np
.
array_equal
(
clone_x
.
numpy
(),
np
.
array
([
1.2
]).
astype
(
'float32'
)))
self
.
assertEqual
(
clone_x
.
dtype
,
core
.
VarDesc
.
VarType
.
FP32
)
y
=
clone_x
**
2
y
.
backward
()
self
.
assertTrue
(
np
.
array_equal
(
x
.
grad
,
np
.
array
([
2.4
]).
astype
(
'float32'
)))
# set_default_dtype take effect on complex
x
=
paddle
.
to_tensor
(
1
+
2j
,
place
=
place
,
stop_gradient
=
False
)
...
...
@@ -132,7 +141,7 @@ class TestVarBase(unittest.TestCase):
_test_place
(
core
.
CPUPlace
())
if
core
.
is_compiled_with_cuda
():
_test_place
(
core
.
CUDAPinnedPlace
())
#
_test_place(core.CUDAPinnedPlace())
_test_place
(
core
.
CUDAPlace
(
0
))
def
test_to_variable
(
self
):
...
...
@@ -405,6 +414,7 @@ class TestVarBase(unittest.TestCase):
self
.
assertListEqual
(
list
(
var_base
.
shape
),
list
(
static_var
.
shape
))
def
test_tensor_str
(
self
):
paddle
.
enable_static
()
paddle
.
disable_static
(
paddle
.
CPUPlace
())
paddle
.
manual_seed
(
10
)
a
=
paddle
.
rand
([
10
,
20
])
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录