Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
74551758
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
74551758
编写于
2月 21, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code
test=develop
上级
1f0ef42e
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
10 addition
and
41 deletion
+10
-41
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+2
-2
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+6
-11
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+0
-21
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+1
-1
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+1
-6
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
74551758
...
@@ -175,7 +175,7 @@ std::unique_ptr<VarBase> VarBase::NewVarBase(const platform::Place& dst_place,
...
@@ -175,7 +175,7 @@ std::unique_ptr<VarBase> VarBase::NewVarBase(const platform::Place& dst_place,
PADDLE_ENFORCE
(
var_
->
IsInitialized
(),
PADDLE_ENFORCE
(
var_
->
IsInitialized
(),
"Variable must be initialized when getting numpy tensor"
);
"Variable must be initialized when getting numpy tensor"
);
std
::
unique_ptr
<
VarBase
>
new_var
(
new
VarBase
(
"NewVarBase"
));
std
::
unique_ptr
<
VarBase
>
new_var
(
new
VarBase
());
framework
::
LoDTensor
*
tensor
=
framework
::
LoDTensor
*
tensor
=
new_var
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
new_var
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
Resize
(
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
());
tensor
->
Resize
(
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
());
...
@@ -303,7 +303,7 @@ std::vector<VarBase*> PyLayer::Apply(int func_id,
...
@@ -303,7 +303,7 @@ std::vector<VarBase*> PyLayer::Apply(int func_id,
std
::
vector
<
Variable
*>
outvars
=
CallPythonFunc
(
py_funcs_
[
func_id
],
invars
);
std
::
vector
<
Variable
*>
outvars
=
CallPythonFunc
(
py_funcs_
[
func_id
],
invars
);
std
::
vector
<
VarBase
*>
ret
;
std
::
vector
<
VarBase
*>
ret
;
for
(
Variable
*
v
:
outvars
)
{
for
(
Variable
*
v
:
outvars
)
{
ret
.
push_back
(
new
VarBase
(
v
,
new
VarBase
(
"PYLAYER_XGRAD"
,
true
),
""
));
ret
.
push_back
(
new
VarBase
(
v
,
new
VarBase
(
true
)
));
}
}
return
ret
;
return
ret
;
}
}
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
74551758
...
@@ -103,28 +103,24 @@ class OpBase;
...
@@ -103,28 +103,24 @@ class OpBase;
*/
*/
class
VarBase
{
class
VarBase
{
public:
public:
explicit
VarBase
(
std
::
string
name
)
VarBase
()
:
VarBase
(
new
framework
::
Variable
(),
new
VarBase
(
true
))
{}
:
VarBase
(
new
framework
::
Variable
(),
new
VarBase
(
name
+
"XGRAD"
,
true
),
name
)
{}
// Owns `var` and `grad`
// Owns `var` and `grad`
VarBase
(
framework
::
Variable
*
var
,
VarBase
*
grad
,
std
::
string
name
)
VarBase
(
framework
::
Variable
*
var
,
VarBase
*
grad
)
:
var_desc_
(
nullptr
),
:
var_desc_
(
nullptr
),
var_
(
var
),
var_
(
var
),
grads_
(
grad
),
grads_
(
grad
),
stop_gradient_
(
false
),
stop_gradient_
(
false
),
pre_op_
(
nullptr
),
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
pre_op_out_idx_
(
-
1
)
{}
name_
(
name
)
{}
explicit
VarBase
(
std
::
string
name
,
bool
stop_gradient
)
explicit
VarBase
(
bool
stop_gradient
)
:
var_desc_
(
nullptr
),
:
var_desc_
(
nullptr
),
var_
(
new
framework
::
Variable
()),
var_
(
new
framework
::
Variable
()),
grads_
(
stop_gradient
?
nullptr
:
new
VarBase
(
name
+
"XGRAD"
,
true
)),
grads_
(
stop_gradient
?
nullptr
:
new
VarBase
(
true
)),
stop_gradient_
(
stop_gradient
),
stop_gradient_
(
stop_gradient
),
pre_op_
(
nullptr
),
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
pre_op_out_idx_
(
-
1
)
{}
name_
(
name
)
{}
virtual
~
VarBase
()
{
virtual
~
VarBase
()
{
if
(
var_
)
{
if
(
var_
)
{
...
@@ -187,7 +183,6 @@ class VarBase {
...
@@ -187,7 +183,6 @@ class VarBase {
OpBase
*
pre_op_
;
OpBase
*
pre_op_
;
std
::
string
pre_op_out_name_
;
std
::
string
pre_op_out_name_
;
int
pre_op_out_idx_
;
int
pre_op_out_idx_
;
std
::
string
name_
;
};
};
/* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its
/* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its
...
...
paddle/fluid/imperative/tracer.cc
浏览文件 @
74551758
...
@@ -66,33 +66,12 @@ platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
...
@@ -66,33 +66,12 @@ platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
return
result
;
return
result
;
}
}
// framework::BlockDesc* InferShapeAndVarType(OpBase* op, const VarBasePtrMap&
// inputs, const VarBasePtrMap& outputs) {
// std::unique_ptr<BlockDesc> block(new BlockDesc());
// // construct op desc
// op->op_desc_ = block.AppendOp();
// // construct op inputs and outputs
// // for
// //
// for (auto it = )
// op->op_desc_->SetInput()
// op->op_desc_->InferShape(*block);
// op->op_desc_->InferVarType(block.get());
// return block.release();
// }
void
Tracer
::
Trace
(
OpBase
*
op
,
const
VarBasePtrMap
&
inputs
,
void
Tracer
::
Trace
(
OpBase
*
op
,
const
VarBasePtrMap
&
inputs
,
const
VarBasePtrMap
&
outputs
,
framework
::
BlockDesc
*
block
,
const
VarBasePtrMap
&
outputs
,
framework
::
BlockDesc
*
block
,
const
platform
::
Place
expected_place
,
const
platform
::
Place
expected_place
,
const
bool
stop_gradient
)
{
const
bool
stop_gradient
)
{
std
::
map
<
std
::
string
,
VarBase
*>
vars
;
std
::
map
<
std
::
string
,
VarBase
*>
vars
;
// framework::BlockDesc* block = InferShapeAndVarType(op, inputs, outputs);
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferShape
(
*
block
);
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
74551758
...
@@ -137,7 +137,7 @@ PYBIND11_MODULE(core, m) {
...
@@ -137,7 +137,7 @@ PYBIND11_MODULE(core, m) {
py
::
class_
<
imperative
::
VarBase
>
(
m
,
"VarBase"
,
R"DOC()DOC"
)
py
::
class_
<
imperative
::
VarBase
>
(
m
,
"VarBase"
,
R"DOC()DOC"
)
// .def(py::init<>())
// .def(py::init<>())
.
def
(
py
::
init
<
std
::
string
,
bool
>
(),
py
::
arg
(
"stop_gradient"
)
=
false
,
py
::
arg
(
"name"
)
=
""
)
.
def
(
py
::
init
<
bool
>
(),
py
::
arg
(
"stop_gradient"
)
=
false
)
.
def
(
"_run_backward"
,
.
def
(
"_run_backward"
,
[](
imperative
::
VarBase
&
self
)
{
self
.
RunBackward
();
})
[](
imperative
::
VarBase
&
self
)
{
self
.
RunBackward
();
})
.
def
(
"_grad_name"
,
&
imperative
::
VarBase
::
GradName
)
.
def
(
"_grad_name"
,
&
imperative
::
VarBase
::
GradName
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
74551758
...
@@ -306,10 +306,6 @@ class Variable(object):
...
@@ -306,10 +306,6 @@ class Variable(object):
if
name
is
None
:
if
name
is
None
:
name
=
unique_name
.
generate
(
'_generated_var'
)
name
=
unique_name
.
generate
(
'_generated_var'
)
# print("create var", name)
# import sys
# sys.stdout.flush()
is_new_var
=
False
is_new_var
=
False
name
=
cpt
.
to_text
(
name
)
name
=
cpt
.
to_text
(
name
)
self
.
desc
=
self
.
block
.
desc
.
find_var
(
cpt
.
to_bytes
(
name
))
self
.
desc
=
self
.
block
.
desc
.
find_var
(
cpt
.
to_bytes
(
name
))
...
@@ -387,9 +383,8 @@ class Variable(object):
...
@@ -387,9 +383,8 @@ class Variable(object):
if
_in_imperative_mode
():
if
_in_imperative_mode
():
self
.
_ivar
=
kwargs
.
get
(
"ivar"
,
None
)
self
.
_ivar
=
kwargs
.
get
(
"ivar"
,
None
)
if
not
self
.
_ivar
:
if
not
self
.
_ivar
:
self
.
_ivar
=
core
.
VarBase
(
name
,
stop_gradient
)
self
.
_ivar
=
core
.
VarBase
(
stop_gradient
)
self
.
_ivar
.
desc
=
self
.
desc
self
.
_ivar
.
desc
=
self
.
desc
self
.
_ivar
.
stop_gradient
=
stop_gradient
def
_numpy
(
self
):
def
_numpy
(
self
):
new_ivar
=
self
.
_ivar
.
_copy_to
(
core
.
CPUPlace
(),
True
)
new_ivar
=
self
.
_ivar
.
_copy_to
(
core
.
CPUPlace
(),
True
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录