Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
b80fe826
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b80fe826
编写于
12月 03, 2018
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish
test=develop
上级
93c16d96
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
96 addition
and
87 deletion
+96
-87
paddle/fluid/framework/framework.proto
paddle/fluid/framework/framework.proto
+1
-1
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+31
-22
paddle/fluid/pybind/imperative.cc
paddle/fluid/pybind/imperative.cc
+6
-13
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+9
-7
python/paddle/fluid/imperative/base.py
python/paddle/fluid/imperative/base.py
+26
-3
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+18
-37
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+2
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+0
-1
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+2
-2
python/setup.py.in
python/setup.py.in
+1
-0
未找到文件。
paddle/fluid/framework/framework.proto
浏览文件 @
b80fe826
...
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
syntax
=
"proto2"
;
//
option optimize_for = LITE_RUNTIME;
option
optimize_for
=
LITE_RUNTIME
;
package
paddle
.
framework.proto
;
// Any incompatible changes to ProgramDesc and its dependencies should
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
b80fe826
...
...
@@ -43,24 +43,31 @@ void CreateGradOp(const framework::OpDesc& op_desc,
class
Tracer
{
public:
Tracer
()
{}
explicit
Tracer
(
framework
::
BlockDesc
*
root_block
)
:
root_block_
(
root_block
)
{
root_scope_
=
new
framework
::
Scope
();
scopes_
[
root_block_
]
=
root_scope_
;
}
virtual
~
Tracer
()
{
delete
root_scope_
;
}
void
Trace
(
OpBase
*
op
,
const
std
::
vector
<
VarBase
*>&
inputs
,
const
std
::
vector
<
VarBase
*>&
outputs
)
{
const
std
::
vector
<
VarBase
*>&
outputs
,
framework
::
BlockDesc
*
block
)
{
framework
::
Scope
*
scope
=
GetScope
(
block
);
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
LOG
(
ERROR
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
_
);
op_desc
->
InferVarType
(
block
_
);
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferVarType
(
block
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
*
op
->
input_vars_
=
inputs
;
for
(
VarBase
*
input
:
inputs
)
{
const
std
::
string
vname
=
input
->
var_desc_
->
Name
();
framework
::
Variable
*
var
=
scope
_
->
Var
(
vname
);
framework
::
Variable
*
var
=
scope
->
Var
(
vname
);
input
->
var_
=
var
;
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block
_
->
FindVar
(
vname
);
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
var
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
...
...
@@ -78,9 +85,9 @@ class Tracer {
*
op
->
output_vars_
=
outputs
;
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
const
std
::
string
vname
=
outputs
[
i
]
->
var_desc_
->
Name
();
framework
::
Variable
*
var
=
scope
_
->
Var
(
vname
);
framework
::
Variable
*
var
=
scope
->
Var
(
vname
);
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block
_
->
FindVar
(
vname
);
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
var
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
...
...
@@ -91,28 +98,30 @@ class Tracer {
outputs
[
i
]
->
pre_op_
=
op
;
outputs
[
i
]
->
pre_op_out_idx_
=
i
;
}
op_base
->
Run
(
*
scope_
,
platform
::
CPUPlace
());
op_base
->
Run
(
*
scope
,
platform
::
CPUPlace
());
framework
::
OpDesc
*
grad_op_desc
;
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block
_
},
&
grad_op_desc
,
grad_to_var
);
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_to_var_
=
grad_to_var
;
op
->
block_
=
block
_
;
op
->
block_
=
block
;
}
void
SetScope
(
framework
::
Scope
*
scope
)
{
scope_
=
scope
;
}
void
SetBlock
(
framework
::
BlockDesc
*
block
)
{
block_
=
block
;
}
framework
::
Scope
*
Scope
()
const
{
return
scope_
;
}
framework
::
BlockDesc
*
Block
()
const
{
return
block_
;
}
framework
::
Scope
*
GetScope
(
framework
::
BlockDesc
*
block
)
{
if
(
scopes_
.
find
(
block
)
!=
scopes_
.
end
())
{
return
scopes_
.
at
(
block
);
}
framework
::
BlockDesc
*
parent_block
=
block
->
ParentBlock
();
PADDLE_ENFORCE
(
scopes_
.
find
(
parent_block
)
!=
scopes_
.
end
());
framework
::
Scope
*
scope
=
&
scopes_
[
parent_block
]
->
NewScope
();
scopes_
[
block
]
=
scope
;
return
scope
;
}
private:
framework
::
BlockDesc
*
block
_
;
framework
::
Scope
*
scope
_
;
std
::
vector
<
Runnable
*>
runnables
_
;
std
::
map
<
framework
::
BlockDesc
*
,
framework
::
Scope
*>
scopes
_
;
framework
::
BlockDesc
*
root_block
_
;
framework
::
Scope
*
root_scope
_
;
};
}
// namespace imperative
...
...
paddle/fluid/pybind/imperative.cc
浏览文件 @
b80fe826
...
...
@@ -23,20 +23,13 @@ namespace pybind {
// Bind Methods
void
BindTracer
(
pybind11
::
module
*
m
)
{
pybind11
::
class_
<
imperative
::
Tracer
>
(
*
m
,
"Tracer"
,
""
)
.
def
(
pybind11
::
init
<>
())
.
def
(
"__init__"
,
[](
imperative
::
Tracer
&
self
,
framework
::
BlockDesc
*
root_block
)
{
new
(
&
self
)
imperative
::
Tracer
(
root_block
);
})
.
def
(
"trace"
,
&
imperative
::
Tracer
::
Trace
)
.
def_property
(
"scope"
,
[](
const
imperative
::
Tracer
&
self
)
{
return
self
.
Scope
();
},
[](
imperative
::
Tracer
&
self
,
framework
::
Scope
*
scope
)
{
self
.
SetScope
(
scope
);
},
R"DOC()DOC"
)
.
def_property
(
"block"
,
[](
const
imperative
::
Tracer
&
self
)
{
return
self
.
Block
();
},
[](
imperative
::
Tracer
&
self
,
framework
::
BlockDesc
*
block
)
{
self
.
SetBlock
(
block
);
},
R"DOC()DOC"
);
.
def
(
"get_scope"
,
&
imperative
::
Tracer
::
GetScope
,
pybind11
::
return_value_policy
::
reference
);
}
}
// namespace pybind
...
...
python/paddle/fluid/framework.py
浏览文件 @
b80fe826
...
...
@@ -358,11 +358,13 @@ class Variable(core.VarBase):
self
.
stop_gradient
=
stop_gradient
self
.
is_data
=
is_data
def
numpy
(
self
,
scope
):
def
numpy
(
self
):
scope
=
_imperative_tracer
().
get_scope
(
self
.
block
.
desc
)
tensor
=
core
.
get_variable_tensor
(
scope
,
self
.
desc
.
name
())
return
np
.
array
(
tensor
)
def
backward
(
self
,
scope
):
def
backward
(
self
):
scope
=
_imperative_tracer
().
get_scope
(
self
.
block
.
desc
)
self
.
_run_backward
(
scope
)
def
grad
(
self
):
...
...
@@ -668,14 +670,14 @@ class Operator(core.OpBase):
for
inp
in
inputs
.
values
():
if
isinstance
(
inp
,
Variable
):
input_vars
.
append
(
inp
)
elif
isinstance
(
inp
,
list
):
elif
isinstance
(
inp
,
list
)
or
isinstance
(
inp
,
tuple
)
:
input_vars
.
extend
(
inp
[:])
self
.
inputs
=
input_vars
output_vars
=
[]
for
out
in
outputs
.
values
():
if
isinstance
(
out
,
Variable
):
output_vars
.
append
(
out
)
elif
isinstance
(
inp
,
list
):
elif
isinstance
(
out
,
list
)
or
isinstance
(
out
,
tuple
):
output_vars
.
extend
(
out
[:])
self
.
outputs
=
output_vars
...
...
@@ -1246,7 +1248,7 @@ class Block(object):
if
_in_imperative_mode
():
op_desc
=
core
.
OpDesc
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
_imperative_tracer
().
trace
(
op
,
op
.
inputs
,
op
.
outputs
)
_imperative_tracer
().
trace
(
op
,
op
.
inputs
,
op
.
outputs
,
self
.
desc
)
else
:
op_desc
=
self
.
desc
.
append_op
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
...
...
@@ -2257,9 +2259,9 @@ def _get_var(name, program=None):
@
contextlib
.
contextmanager
def
_imperative_guard
():
def
_imperative_guard
(
tracer
):
global
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
_imperative_tracer_
=
core
.
Tracer
()
_imperative_tracer_
=
tracer
yield
_imperative_tracer_
=
tmp_trace
python/paddle/fluid/imperative/base.py
浏览文件 @
b80fe826
...
...
@@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
contextlib
import
numpy
as
np
from
paddle.fluid
import
core
from
paddle.fluid
import
framework
__all__
=
[
'enabled'
,
'guard'
]
__all__
=
[
'enabled'
,
'guard'
,
'to_variable'
]
def
enabled
():
...
...
@@ -26,8 +28,29 @@ def enabled():
def
guard
():
train
=
framework
.
Program
()
startup
=
framework
.
Program
()
tracer
=
core
.
Tracer
(
train
.
current_block
().
desc
)
with
framework
.
program_guard
(
train
,
startup
):
with
framework
.
unique_name
.
guard
():
with
framework
.
_imperative_guard
():
with
framework
.
_imperative_guard
(
tracer
):
yield
# TODO: check train, startup not changed.
def
to_variable
(
value
,
block
=
None
):
if
isinstance
(
value
,
np
.
ndarray
):
if
not
block
:
block
=
framework
.
default_main_program
().
current_block
()
py_var
=
framework
.
Variable
(
block
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
name
=
None
,
shape
=
value
.
shape
,
dtype
=
value
.
dtype
)
scope
=
framework
.
_imperative_tracer
().
get_scope
(
block
.
desc
)
var
=
scope
.
var
(
py_var
.
name
)
tensor
=
var
.
get_tensor
()
tensor
.
set
(
value
,
core
.
CPUPlace
())
return
py_var
elif
isinstance
(
value
,
framework
.
Variable
):
return
value
else
:
raise
ValueError
(
"Unsupported type %s"
%
type
(
value
))
python/paddle/fluid/imperative/layers.py
浏览文件 @
b80fe826
...
...
@@ -18,51 +18,32 @@ import numpy as np
from
paddle.fluid
import
core
from
paddle.fluid
import
framework
from
paddle.fluid.imperative
import
base
__all__
=
[
'PyLayer'
]
@
contextlib
.
contextmanager
def
trace_scope
(
scope
,
block
):
tmp_scope
=
framework
.
_imperative_tracer
().
scope
tmp_block
=
framework
.
_imperative_tracer
().
block
framework
.
_imperative_tracer
().
scope
=
scope
framework
.
_imperative_tracer
().
block
=
block
yield
framework
.
_imperative_tracer
().
scope
=
tmp_scope
framework
.
_imperative_tracer
().
block
=
tmp_block
class
PyLayer
(
core
.
Layer
):
def
__init__
(
self
):
self
.
_scope
=
core
.
Scope
()
self
.
_block
=
framework
.
default_main_program
().
current_block
()
pass
def
__call__
(
self
,
inputs
):
with
trace_scope
(
self
.
_scope
,
self
.
_block
.
desc
):
if
not
isinstance
(
inputs
,
list
)
and
not
isinstance
(
inputs
,
tuple
):
inputs
=
[
inputs
]
var_inputs
=
[]
for
x
in
inputs
:
if
isinstance
(
x
,
np
.
ndarray
):
py_var
=
framework
.
Variable
(
self
.
_block
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
name
=
None
,
shape
=
x
.
shape
,
dtype
=
x
.
dtype
)
var
=
self
.
_scope
.
var
(
py_var
.
name
)
tensor
=
var
.
get_tensor
()
tensor
.
set
(
x
,
core
.
CPUPlace
())
var_inputs
.
append
(
py_var
)
elif
isinstance
(
x
,
framework
.
Variable
):
var_inputs
.
append
(
x
)
else
:
raise
ValueError
(
"not var or ndarray %s"
%
type
(
x
))
outputs
=
self
.
forward
(
var_inputs
)
return
outputs
# TODO(panyx0718): Support declarative mode as well.
assert
base
.
enabled
()
if
not
isinstance
(
inputs
,
list
)
and
not
isinstance
(
inputs
,
tuple
):
inputs
=
[
inputs
]
var_inputs
=
[]
for
x
in
inputs
:
if
isinstance
(
x
,
np
.
ndarray
):
py_var
=
base
.
to_variable
(
x
)
var_inputs
.
append
(
py_var
)
elif
isinstance
(
x
,
framework
.
Variable
):
var_inputs
.
append
(
x
)
else
:
raise
ValueError
(
"not var or ndarray %s"
%
type
(
x
))
outputs
=
self
.
forward
(
var_inputs
)
return
outputs
def
forward
(
self
,
inputs
):
print
(
"at python."
)
return
[]
python/paddle/fluid/layer_helper.py
浏览文件 @
b80fe826
...
...
@@ -23,6 +23,7 @@ import numpy as np
from
.framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
from
.
import
unique_name
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
paddle.fluid.imperative
import
base
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.
import
core
from
six.moves
import
zip
...
...
@@ -62,7 +63,7 @@ class LayerHelper(object):
if
isinstance
(
x
,
Variable
):
return
x
elif
isinstance
(
x
,
np
.
ndarray
):
return
self
.
_np_to_variable
(
x
)
return
base
.
to_variable
(
x
,
self
.
main_program
.
current_block
()
)
else
:
raise
ValueError
(
"inputs wrong type %s
\n
"
%
x
)
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
b80fe826
...
...
@@ -17,7 +17,6 @@ All layers just related to the neural network.
from
__future__
import
print_function
import
sys
import
numpy
as
np
import
os
from
..layer_helper
import
LayerHelper
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
b80fe826
...
...
@@ -43,8 +43,8 @@ class TestImperative(unittest.TestCase):
l
=
MyLayer
()
x
=
l
(
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
))[
0
]
self
.
assertIsNotNone
(
x
)
sys
.
stderr
.
write
(
"%s output: %s
\n
"
%
(
x
,
x
.
numpy
(
scope
=
l
.
_scope
)))
x
.
backward
(
l
.
_scope
)
sys
.
stderr
.
write
(
"%s output: %s
\n
"
%
(
x
,
x
.
numpy
()))
x
.
backward
()
sys
.
stderr
.
write
(
"grad %s
\n
"
%
l
.
_x_for_debug
.
grad
())
...
...
python/setup.py.in
浏览文件 @
b80fe826
...
...
@@ -101,6 +101,7 @@ packages=['paddle',
'paddle.dataset',
'paddle.reader',
'paddle.fluid',
'paddle.fluid.imperative',
'paddle.fluid.proto',
'paddle.fluid.proto.profiler',
'paddle.fluid.layers',
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录