Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
e5d64fd4
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e5d64fd4
编写于
12月 02, 2018
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
initial imperative
test=develop
上级
4d0df1fe
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
362 addition
and
20 deletion
+362
-20
paddle/fluid/imperative/CMakeLists.txt
paddle/fluid/imperative/CMakeLists.txt
+1
-1
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+251
-1
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+44
-4
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+33
-3
paddle/fluid/pybind/CMakeLists.txt
paddle/fluid/pybind/CMakeLists.txt
+1
-1
paddle/fluid/pybind/imperative.h
paddle/fluid/pybind/imperative.h
+5
-0
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+12
-5
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+12
-5
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+3
-0
未找到文件。
paddle/fluid/imperative/CMakeLists.txt
浏览文件 @
e5d64fd4
cc_library
(
layer SRCS layer.cc DEPS proto_desc
)
cc_library
(
layer SRCS layer.cc DEPS proto_desc
operator
)
cc_library
(
tracer SRCS tracer.cc DEPS proto_desc
)
cc_library
(
engine SRCS engine.cc
)
paddle/fluid/imperative/layer.cc
浏览文件 @
e5d64fd4
...
...
@@ -13,7 +13,257 @@
// limitations under the License.
#include "paddle/fluid/imperative/layer.h"
#include <deque>
#include <limits>
#include <map>
#include <random>
#include <utility>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/string/printf.h"
namespace
paddle
{
namespace
imperative
{}
// namespace imperative
namespace
imperative
{
using
framework
::
Variable
;
void
AddTo
(
Variable
*
src
,
Variable
*
dst
)
{
framework
::
LoDTensor
*
dst_tensor
=
dst
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
LoDTensor
*
src_tensor
=
src
->
GetMutable
<
framework
::
LoDTensor
>
();
PADDLE_ENFORCE
(
dst_tensor
->
numel
()
==
src_tensor
->
numel
(),
"%lld vs %lld"
,
dst_tensor
->
numel
(),
src_tensor
->
numel
());
float
*
dst_data
=
dst_tensor
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
const
float
*
src_data
=
src_tensor
->
data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
src_tensor
->
numel
();
++
i
)
{
dst_data
[
i
]
+=
src_data
[
i
];
}
}
class
Autograd
{
public:
explicit
Autograd
(
framework
::
Scope
*
scope
)
:
scope_
(
scope
)
{}
void
RunBackward
(
VarBase
*
var
,
framework
::
Variable
*
grad
)
{
if
(
!
var
->
pre_op_
)
{
var
->
ApplyGrad
(
scope_
,
grad
);
return
;
}
PADDLE_ENFORCE
(
var
->
pre_op_
->
op_desc_
);
// TODO(panyx0718): Only create vars that "require_grad"
std
::
vector
<
Variable
*>
op_grads
=
CreateOpGrads
(
var
->
pre_op_
->
output_vars_
->
size
());
op_grads
[
var
->
pre_op_out_idx_
]
=
grad
;
std
::
deque
<
std
::
pair
<
OpBase
*
,
std
::
vector
<
Variable
*>>>
ready
;
ready
.
push_back
(
std
::
make_pair
(
var
->
pre_op_
,
op_grads
));
std
::
map
<
OpBase
*
,
int
>
dep_counts
=
ComputeDepCounts
(
var
->
pre_op_
);
std
::
map
<
OpBase
*
,
std
::
vector
<
Variable
*>>
visited
;
while
(
!
ready
.
empty
())
{
OpBase
*
ready_op
=
ready
.
front
().
first
;
std
::
vector
<
Variable
*>
ready_op_grads
=
ready
.
front
().
second
;
ready
.
pop_front
();
std
::
vector
<
Variable
*>
input_grads
=
ready_op
->
ApplyGrad
(
scope_
);
for
(
size_t
i
=
0
;
i
<
input_grads
.
size
();
++
i
)
{
if
(
!
input_grads
[
i
])
continue
;
OpBase
*
pre_op
=
ready_op
->
pre_ops_
->
at
(
i
);
if
(
!
pre_op
)
continue
;
int
pre_op_out_idx
=
ready_op
->
pre_ops_out_idx_
->
at
(
i
);
dep_counts
[
pre_op
]
-=
1
;
PADDLE_ENFORCE
(
dep_counts
[
pre_op
]
>=
0
);
bool
pre_op_ready
=
dep_counts
[
pre_op
]
==
0
;
if
(
pre_op_ready
)
{
if
(
visited
.
find
(
pre_op
)
==
visited
.
end
())
{
PADDLE_ENFORCE
(
pre_op
->
output_vars_
->
size
()
==
1
);
visited
[
pre_op
]
=
{
input_grads
[
i
]};
}
else
{
std
::
vector
<
Variable
*>&
pre_op_grads
=
visited
[
pre_op
];
AccumGrads
(
pre_op_out_idx
,
input_grads
[
i
],
&
pre_op_grads
);
}
ready
.
push_back
(
std
::
make_pair
(
pre_op
,
visited
[
pre_op
]));
}
else
{
if
(
visited
.
find
(
pre_op
)
==
visited
.
end
())
{
// TODO(panyx0718): Only create vars that "require_grad"
visited
[
pre_op
]
=
CreateOpGrads
(
var
->
pre_op_
->
output_vars_
->
size
());
}
else
{
}
std
::
vector
<
Variable
*>&
pre_op_grads
=
visited
[
pre_op
];
AccumGrads
(
pre_op_out_idx
,
input_grads
[
i
],
&
pre_op_grads
);
}
}
}
}
private:
void
AccumGrads
(
int
grad_idx
,
Variable
*
grad
,
std
::
vector
<
Variable
*>*
op_grads
)
{
if
(
!
(
*
op_grads
)[
grad_idx
])
{
// FIXME(panyx0718): This should be a deep copy.
(
*
op_grads
)[
grad_idx
]
=
grad
;
return
;
}
AddTo
(
grad
,
(
*
op_grads
)[
grad_idx
]);
}
std
::
map
<
OpBase
*
,
int
>
ComputeDepCounts
(
OpBase
*
op
)
{
std
::
map
<
OpBase
*
,
int
>
ret
;
std
::
deque
<
OpBase
*>
queue
;
queue
.
push_back
(
op
);
std
::
unordered_set
<
OpBase
*>
visited
;
visited
.
insert
(
op
);
while
(
!
queue
.
empty
())
{
OpBase
*
candidate
=
queue
.
front
();
queue
.
pop_front
();
for
(
OpBase
*
pre_op
:
*
(
candidate
->
pre_ops_
))
{
if
(
!
pre_op
)
continue
;
if
(
visited
.
find
(
pre_op
)
==
visited
.
end
())
{
visited
.
insert
(
pre_op
);
queue
.
push_back
(
pre_op
);
}
ret
[
pre_op
]
+=
1
;
}
}
return
ret
;
}
std
::
vector
<
Variable
*>
CreateOpGrads
(
size_t
count
)
{
std
::
vector
<
Variable
*>
op_grads
;
for
(
size_t
i
=
0
;
i
<
count
;
++
i
)
{
op_grads
.
push_back
(
nullptr
);
}
return
op_grads
;
}
framework
::
Scope
*
scope_
;
};
framework
::
Variable
*
CreateVariable
(
const
std
::
string
&
name
,
const
framework
::
DDim
&
dim
,
float
val
,
framework
::
Scope
*
scope
,
bool
random_name
=
true
)
{
std
::
string
varname
=
name
;
if
(
random_name
)
{
std
::
mt19937
rng
;
rng
.
seed
(
std
::
random_device
()());
std
::
uniform_int_distribution
<
std
::
mt19937
::
result_type
>
dist6
(
1
,
std
::
numeric_limits
<
int
>::
max
());
int
id
=
dist6
(
rng
);
varname
=
string
::
Sprintf
(
"%s@%d"
,
varname
,
id
);
}
LOG
(
ERROR
)
<<
"creating var "
<<
varname
;
framework
::
Variable
*
var
=
scope
->
Var
(
varname
);
framework
::
LoDTensor
*
tensor
=
var
->
GetMutable
<
framework
::
LoDTensor
>
();
float
*
data
=
tensor
->
mutable_data
<
float
>
(
dim
,
platform
::
CPUPlace
());
std
::
fill
(
data
,
data
+
tensor
->
numel
(),
val
);
return
var
;
}
framework
::
LoDTensor
&
VarBase
::
Grad
()
{
VLOG
(
3
)
<<
"get var grad "
<<
var_desc_
->
Name
();
return
*
grads_
->
GetMutable
<
framework
::
LoDTensor
>
();
}
void
VarBase
::
ApplyGrad
(
framework
::
Scope
*
scope
,
Variable
*
grad
)
{
VLOG
(
3
)
<<
"apply var grad "
<<
var_desc_
->
Name
()
<<
" "
<<
grad
->
Get
<
framework
::
LoDTensor
>
().
data
<
float
>
()[
0
];
if
(
!
grads_
)
{
grads_
=
CreateVariable
(
string
::
Sprintf
(
"%s@IGrad"
,
var_desc_
->
Name
()),
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
(),
0.0
,
scope
);
}
AddTo
(
grad
,
grads_
);
VLOG
(
3
)
<<
"grad_ after apply var grad "
<<
var_desc_
->
Name
()
<<
" "
<<
grads_
->
Get
<
framework
::
LoDTensor
>
().
data
<
float
>
()[
0
];
}
std
::
vector
<
Variable
*>
OpBase
::
ApplyGrad
(
framework
::
Scope
*
scope
)
{
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
for
(
const
std
::
string
&
invar
:
grad_op_desc_
->
InputArgumentNames
())
{
block_
->
FindRecursiveOrCreateVar
(
invar
);
framework
::
Variable
*
var
=
scope
->
Var
(
invar
);
LOG
(
ERROR
)
<<
"op grad in var "
<<
invar
;
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block_
->
FindVar
(
invar
);
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
LOG
(
ERROR
)
<<
"grad op invar init "
<<
invar
;
var
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
}
}
else
{
var
->
GetMutable
<
framework
::
LoDTensor
>
()
->
type
();
}
}
std
::
vector
<
Variable
*>
ret
;
for
(
size_t
i
=
0
;
i
<
input_vars_
->
size
();
++
i
)
{
ret
.
push_back
(
nullptr
);
}
for
(
const
std
::
string
&
outvar
:
grad_op_desc_
->
OutputArgumentNames
())
{
LOG
(
ERROR
)
<<
"grad outvar "
<<
outvar
;
block_
->
FindRecursiveOrCreateVar
(
outvar
);
framework
::
Variable
*
var
=
scope
->
Var
(
outvar
);
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block_
->
FindVar
(
outvar
);
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
var
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
}
}
}
grad_op_desc_
->
InferShape
(
*
block_
);
grad_op_desc_
->
InferVarType
(
block_
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
opbase
=
framework
::
OpRegistry
::
CreateOp
(
*
grad_op_desc_
);
opbase
->
Run
(
*
scope
,
platform
::
CPUPlace
());
for
(
const
std
::
string
&
outvar
:
grad_op_desc_
->
OutputArgumentNames
())
{
if
(
grad_to_var_
->
find
(
outvar
)
!=
grad_to_var_
->
end
())
{
std
::
string
origin_var
=
(
*
grad_to_var_
)[
outvar
];
for
(
size_t
i
=
0
;
i
<
input_vars_
->
size
();
++
i
)
{
VarBase
*
origin_in_var
=
(
*
input_vars_
)[
i
];
if
(
origin_in_var
->
var_desc_
->
Name
()
==
origin_var
)
{
framework
::
Variable
*
var
=
scope
->
FindVar
(
outvar
);
LOG
(
ERROR
)
<<
"apply grad "
<<
outvar
<<
" with origin "
<<
origin_var
;
// TODO(panyx0718): Accumulate.
// origin_in_var->grads_ = var;
origin_in_var
->
ApplyGrad
(
scope
,
var
);
ret
[
i
]
=
var
;
// TODO(panyx0718): There might be 2 var with the same name. We
// currently assume the are the same Variable*. So it doesn't matter
// which one is used.
break
;
}
}
}
}
return
ret
;
}
void
VarBase
::
RunBackward
(
framework
::
Scope
*
scope
)
{
// TODO(panyx0718): Might not be 0th, need to detect.
grads_
=
CreateVariable
(
pre_op_
->
grad_op_desc_
->
InputArgumentNames
()[
0
],
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
(),
1.0
,
scope
,
false
);
framework
::
Variable
*
grad
=
CreateVariable
(
"init@imperative_grad"
,
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
(),
1.0
,
scope
);
Autograd
(
scope
).
RunBackward
(
this
,
grad
);
}
}
// namespace imperative
}
// namespace paddle
paddle/fluid/imperative/layer.h
浏览文件 @
e5d64fd4
...
...
@@ -14,8 +14,10 @@
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/platform/enforce.h"
...
...
@@ -27,26 +29,64 @@ class OpBase;
class
VarBase
{
public:
VarBase
()
{}
virtual
~
VarBase
()
{}
VarBase
()
:
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
var_desc_
(
nullptr
),
var_
(
nullptr
),
grads_
(
nullptr
)
{}
virtual
~
VarBase
()
{
LOG
(
ERROR
)
<<
"deleting var"
;
LOG
(
ERROR
)
<<
"done deleting var"
;
}
void
ApplyGrad
(
framework
::
Scope
*
scope
,
framework
::
Variable
*
grad
);
void
RunBackward
(
framework
::
Scope
*
scope
);
framework
::
LoDTensor
&
Grad
();
OpBase
*
pre_op_
;
int
pre_op_out_idx_
;
framework
::
VarDesc
*
var_desc_
;
framework
::
Variable
*
var_
;
framework
::
Variable
*
grads_
;
};
class
OpBase
{
public:
OpBase
()
:
input_vars_
(
new
std
::
vector
<
VarBase
*>
()),
output_vars_
(
new
std
::
vector
<
VarBase
*>
())
{}
output_vars_
(
new
std
::
vector
<
VarBase
*>
()),
pre_ops_
(
new
std
::
vector
<
OpBase
*>
()),
pre_ops_out_idx_
(
new
std
::
vector
<
int
>
()),
op_desc_
(
nullptr
),
grad_op_desc_
(
nullptr
)
{}
virtual
~
OpBase
()
{
delete
input_vars_
;
delete
output_vars_
;
delete
pre_ops_
;
delete
pre_ops_out_idx_
;
if
(
grad_op_desc_
)
delete
grad_op_desc_
;
if
(
grad_to_var_
)
delete
grad_to_var_
;
}
std
::
vector
<
framework
::
Variable
*>
ApplyGrad
(
framework
::
Scope
*
scope
);
std
::
vector
<
VarBase
*>*
input_vars_
;
std
::
vector
<
VarBase
*>*
output_vars_
;
std
::
vector
<
OpBase
*>*
pre_ops_
;
std
::
vector
<
int
>*
pre_ops_out_idx_
;
framework
::
OpDesc
*
op_desc_
;
framework
::
OpDesc
*
grad_op_desc_
;
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var_
;
framework
::
BlockDesc
*
block_
;
};
class
Layer
{
...
...
@@ -58,7 +98,7 @@ class Layer {
return
vars
;
}
virtual
void
Backward
()
{
LOG
(
ERROR
)
<<
"
backward at cpp.
"
;
}
virtual
void
Backward
()
{
LOG
(
ERROR
)
<<
"
To support customize
"
;
}
};
}
// namespace imperative
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
e5d64fd4
...
...
@@ -27,6 +27,20 @@
namespace
paddle
{
namespace
imperative
{
void
CreateGradOp
(
const
framework
::
OpDesc
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>&
no_grad_set
,
const
std
::
vector
<
framework
::
BlockDesc
*>&
grad_sub_block
,
framework
::
OpDesc
**
grad_op_desc
,
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var
)
{
std
::
vector
<
std
::
unique_ptr
<
framework
::
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
grad_to_var
,
grad_sub_block
);
PADDLE_ENFORCE
(
grad_op_descs
.
size
()
==
1
,
"Only support 1 grad op now."
);
// TODO(panyx0718): Leak?
*
grad_op_desc
=
grad_op_descs
[
0
].
release
();
}
class
Tracer
{
public:
Tracer
()
{}
...
...
@@ -44,6 +58,7 @@ class Tracer {
for
(
VarBase
*
input
:
inputs
)
{
const
std
::
string
vname
=
input
->
var_desc_
->
Name
();
framework
::
Variable
*
var
=
scope_
->
Var
(
vname
);
input
->
var_
=
var
;
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block_
->
FindVar
(
vname
);
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
...
...
@@ -52,11 +67,17 @@ class Tracer {
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
}
}
if
(
input
->
pre_op_
)
{
op
->
pre_ops_
->
push_back
(
input
->
pre_op_
);
op
->
pre_ops_out_idx_
->
push_back
(
input
->
pre_op_out_idx_
);
}
else
{
op
->
pre_ops_
->
push_back
(
nullptr
);
}
}
*
op
->
output_vars_
=
outputs
;
for
(
auto
output
:
outputs
)
{
const
std
::
string
vname
=
output
->
var_desc_
->
Name
();
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
const
std
::
string
vname
=
output
s
[
i
]
->
var_desc_
->
Name
();
framework
::
Variable
*
var
=
scope_
->
Var
(
vname
);
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block_
->
FindVar
(
vname
);
...
...
@@ -66,9 +87,18 @@ class Tracer {
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
}
}
output
->
pre_op_
=
op
;
outputs
[
i
]
->
var_
=
var
;
outputs
[
i
]
->
pre_op_
=
op
;
outputs
[
i
]
->
pre_op_out_idx_
=
i
;
}
op_base
->
Run
(
*
scope_
,
platform
::
CPUPlace
());
framework
::
OpDesc
*
grad_op_desc
;
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block_
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_to_var_
=
grad_to_var
;
op
->
block_
=
block_
;
}
void
SetScope
(
framework
::
Scope
*
scope
)
{
scope_
=
scope
;
}
...
...
paddle/fluid/pybind/CMakeLists.txt
浏览文件 @
e5d64fd4
set
(
PYBIND_DEPS pybind python proto_desc memory executor async_executor prune feed_fetch_method pass_builder parallel_executor profiler
)
set
(
PYBIND_DEPS pybind python proto_desc memory executor async_executor prune feed_fetch_method pass_builder parallel_executor profiler
layer
)
set
(
PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc async_executor_py.cc imperative.cc
)
if
(
WITH_PYTHON
)
...
...
paddle/fluid/pybind/imperative.h
浏览文件 @
e5d64fd4
...
...
@@ -42,6 +42,11 @@ class PyOpBase : public imperative::OpBase {
using
imperative
::
OpBase
::
OpBase
;
// Inherit constructors
};
class
PyVarBase
:
public
imperative
::
VarBase
{
public:
using
imperative
::
VarBase
::
VarBase
;
// Inherit constructors
};
void
BindTracer
(
pybind11
::
module
*
m
);
}
// namespace pybind
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
e5d64fd4
...
...
@@ -34,6 +34,7 @@ limitations under the License. */
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/version.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"
...
...
@@ -101,8 +102,13 @@ PYBIND11_MODULE(core, m) {
BindException
(
&
m
);
py
::
class_
<
imperative
::
VarBase
>
(
m
,
"VarBase"
,
R"DOC()DOC"
)
py
::
class_
<
imperative
::
VarBase
,
PyVarBase
>
(
m
,
"VarBase"
,
R"DOC()DOC"
)
.
def
(
py
::
init
<>
())
.
def
(
"_run_backward"
,
[](
imperative
::
VarBase
&
self
,
framework
::
Scope
*
scope
)
{
self
.
RunBackward
(
scope
);
})
.
def
(
"_grad"
,
&
imperative
::
VarBase
::
Grad
)
.
def_property
(
"desc"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_desc_
;
},
...
...
@@ -111,13 +117,14 @@ PYBIND11_MODULE(core, m) {
},
py
::
return_value_policy
::
reference
);
py
::
class_
<
imperative
::
OpBase
,
PyOpBase
>
(
m
,
"OpBase"
,
R"DOC()DOC"
)
py
::
class_
<
imperative
::
OpBase
,
PyOpBase
>
(
m
,
"OpBase"
,
R"DOC()DOC"
)
.
def
(
py
::
init
<>
())
.
def_property
(
"desc"
,
[](
const
imperative
::
OpBase
&
self
)
{
return
self
.
op_desc_
;
},
[](
imperative
::
OpBase
&
self
,
framework
::
OpDesc
*
op_desc
)
{
self
.
op_desc_
=
op_desc
;
if
(
op_desc
)
{
self
.
op_desc_
=
op_desc
;
}
},
py
::
return_value_policy
::
reference
);
...
...
python/paddle/fluid/framework.py
浏览文件 @
e5d64fd4
...
...
@@ -276,6 +276,7 @@ class Variable(core.VarBase):
stop_gradient
=
False
,
is_data
=
False
,
**
kwargs
):
core
.
VarBase
.
__init__
(
self
)
self
.
block
=
block
self
.
error_clip
=
error_clip
...
...
@@ -361,6 +362,12 @@ class Variable(core.VarBase):
tensor
=
core
.
get_variable_tensor
(
scope
,
self
.
desc
.
name
())
return
np
.
array
(
tensor
)
def
backward
(
self
,
scope
):
self
.
_run_backward
(
scope
)
def
grad
(
self
):
return
np
.
array
(
self
.
_grad
())
def
__str__
(
self
):
return
self
.
to_string
(
True
)
...
...
@@ -983,6 +990,7 @@ class Block(object):
self
.
desc
=
program
.
desc
.
block
(
idx
)
self
.
vars
=
collections
.
OrderedDict
()
# var_name --> var
self
.
ops
=
list
()
# operator list
self
.
_op_descs
=
list
()
self
.
program
=
program
self
.
removed_vars
=
collections
.
OrderedDict
()
...
...
@@ -1238,13 +1246,12 @@ class Block(object):
if
_in_imperative_mode
():
op_desc
=
core
.
OpDesc
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
sys
.
stderr
.
write
(
'%s %s!!!
\n
'
%
(
type
(
op
.
inputs
),
type
(
op
.
outputs
)))
_imperative_tracer
().
trace
(
op
,
op
.
inputs
,
op
.
outputs
)
return
op_desc
=
self
.
desc
.
append_op
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
else
:
op_desc
=
self
.
desc
.
append_op
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
self
.
ops
.
append
(
op
)
self
.
_op_descs
.
append
(
op_desc
)
return
op
def
_insert_op
(
self
,
index
,
*
args
,
**
kwargs
):
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
e5d64fd4
...
...
@@ -26,6 +26,7 @@ class MyLayer(fluid.imperative.PyLayer):
def
forward
(
self
,
inputs
):
x
=
fluid
.
layers
.
relu
(
inputs
[
0
])
self
.
_x_for_debug
=
x
return
[
fluid
.
layers
.
elementwise_mul
(
x
,
x
)]
...
...
@@ -43,6 +44,8 @@ class TestImperative(unittest.TestCase):
x
=
l
(
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
))[
0
]
self
.
assertIsNotNone
(
x
)
sys
.
stderr
.
write
(
"%s output: %s
\n
"
%
(
x
,
x
.
numpy
(
scope
=
l
.
_scope
)))
x
.
backward
(
l
.
_scope
)
sys
.
stderr
.
write
(
"grad %s
\n
"
%
l
.
_x_for_debug
.
grad
())
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录