Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
b80fe826
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b80fe826
编写于
12月 03, 2018
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish
test=develop
上级
93c16d96
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
96 addition
and
87 deletion
+96
-87
paddle/fluid/framework/framework.proto
paddle/fluid/framework/framework.proto
+1
-1
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+31
-22
paddle/fluid/pybind/imperative.cc
paddle/fluid/pybind/imperative.cc
+6
-13
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+9
-7
python/paddle/fluid/imperative/base.py
python/paddle/fluid/imperative/base.py
+26
-3
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+18
-37
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+2
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+0
-1
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+2
-2
python/setup.py.in
python/setup.py.in
+1
-0
未找到文件。
paddle/fluid/framework/framework.proto
浏览文件 @
b80fe826
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
syntax
=
"proto2"
;
syntax
=
"proto2"
;
//
option optimize_for = LITE_RUNTIME;
option
optimize_for
=
LITE_RUNTIME
;
package
paddle
.
framework.proto
;
package
paddle
.
framework.proto
;
// Any incompatible changes to ProgramDesc and its dependencies should
// Any incompatible changes to ProgramDesc and its dependencies should
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
b80fe826
...
@@ -43,24 +43,31 @@ void CreateGradOp(const framework::OpDesc& op_desc,
...
@@ -43,24 +43,31 @@ void CreateGradOp(const framework::OpDesc& op_desc,
class
Tracer
{
class
Tracer
{
public:
public:
Tracer
()
{}
explicit
Tracer
(
framework
::
BlockDesc
*
root_block
)
:
root_block_
(
root_block
)
{
root_scope_
=
new
framework
::
Scope
();
scopes_
[
root_block_
]
=
root_scope_
;
}
virtual
~
Tracer
()
{
delete
root_scope_
;
}
void
Trace
(
OpBase
*
op
,
const
std
::
vector
<
VarBase
*>&
inputs
,
void
Trace
(
OpBase
*
op
,
const
std
::
vector
<
VarBase
*>&
inputs
,
const
std
::
vector
<
VarBase
*>&
outputs
)
{
const
std
::
vector
<
VarBase
*>&
outputs
,
framework
::
BlockDesc
*
block
)
{
framework
::
Scope
*
scope
=
GetScope
(
block
);
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
LOG
(
ERROR
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
LOG
(
ERROR
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
_
);
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferVarType
(
block
_
);
op_desc
->
InferVarType
(
block
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
*
op
->
input_vars_
=
inputs
;
*
op
->
input_vars_
=
inputs
;
for
(
VarBase
*
input
:
inputs
)
{
for
(
VarBase
*
input
:
inputs
)
{
const
std
::
string
vname
=
input
->
var_desc_
->
Name
();
const
std
::
string
vname
=
input
->
var_desc_
->
Name
();
framework
::
Variable
*
var
=
scope
_
->
Var
(
vname
);
framework
::
Variable
*
var
=
scope
->
Var
(
vname
);
input
->
var_
=
var
;
input
->
var_
=
var
;
if
(
!
var
->
IsInitialized
())
{
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block
_
->
FindVar
(
vname
);
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
var
->
GetMutable
<
framework
::
LoDTensor
>
();
var
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
}
else
{
...
@@ -78,9 +85,9 @@ class Tracer {
...
@@ -78,9 +85,9 @@ class Tracer {
*
op
->
output_vars_
=
outputs
;
*
op
->
output_vars_
=
outputs
;
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
const
std
::
string
vname
=
outputs
[
i
]
->
var_desc_
->
Name
();
const
std
::
string
vname
=
outputs
[
i
]
->
var_desc_
->
Name
();
framework
::
Variable
*
var
=
scope
_
->
Var
(
vname
);
framework
::
Variable
*
var
=
scope
->
Var
(
vname
);
if
(
!
var
->
IsInitialized
())
{
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block
_
->
FindVar
(
vname
);
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
var
->
GetMutable
<
framework
::
LoDTensor
>
();
var
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
}
else
{
...
@@ -91,28 +98,30 @@ class Tracer {
...
@@ -91,28 +98,30 @@ class Tracer {
outputs
[
i
]
->
pre_op_
=
op
;
outputs
[
i
]
->
pre_op_
=
op
;
outputs
[
i
]
->
pre_op_out_idx_
=
i
;
outputs
[
i
]
->
pre_op_out_idx_
=
i
;
}
}
op_base
->
Run
(
*
scope_
,
platform
::
CPUPlace
());
op_base
->
Run
(
*
scope
,
platform
::
CPUPlace
());
framework
::
OpDesc
*
grad_op_desc
;
framework
::
OpDesc
*
grad_op_desc
;
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block
_
},
&
grad_op_desc
,
grad_to_var
);
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_to_var_
=
grad_to_var
;
op
->
grad_to_var_
=
grad_to_var
;
op
->
block_
=
block
_
;
op
->
block_
=
block
;
}
}
void
SetScope
(
framework
::
Scope
*
scope
)
{
scope_
=
scope
;
}
framework
::
Scope
*
GetScope
(
framework
::
BlockDesc
*
block
)
{
if
(
scopes_
.
find
(
block
)
!=
scopes_
.
end
())
{
void
SetBlock
(
framework
::
BlockDesc
*
block
)
{
block_
=
block
;
}
return
scopes_
.
at
(
block
);
}
framework
::
Scope
*
Scope
()
const
{
return
scope_
;
}
framework
::
BlockDesc
*
parent_block
=
block
->
ParentBlock
();
PADDLE_ENFORCE
(
scopes_
.
find
(
parent_block
)
!=
scopes_
.
end
());
framework
::
BlockDesc
*
Block
()
const
{
return
block_
;
}
framework
::
Scope
*
scope
=
&
scopes_
[
parent_block
]
->
NewScope
();
scopes_
[
block
]
=
scope
;
return
scope
;
}
private:
private:
framework
::
BlockDesc
*
block
_
;
std
::
map
<
framework
::
BlockDesc
*
,
framework
::
Scope
*>
scopes
_
;
framework
::
Scope
*
scope
_
;
framework
::
BlockDesc
*
root_block
_
;
std
::
vector
<
Runnable
*>
runnables
_
;
framework
::
Scope
*
root_scope
_
;
};
};
}
// namespace imperative
}
// namespace imperative
...
...
paddle/fluid/pybind/imperative.cc
浏览文件 @
b80fe826
...
@@ -23,20 +23,13 @@ namespace pybind {
...
@@ -23,20 +23,13 @@ namespace pybind {
// Bind Methods
// Bind Methods
void
BindTracer
(
pybind11
::
module
*
m
)
{
void
BindTracer
(
pybind11
::
module
*
m
)
{
pybind11
::
class_
<
imperative
::
Tracer
>
(
*
m
,
"Tracer"
,
""
)
pybind11
::
class_
<
imperative
::
Tracer
>
(
*
m
,
"Tracer"
,
""
)
.
def
(
pybind11
::
init
<>
())
.
def
(
"__init__"
,
[](
imperative
::
Tracer
&
self
,
framework
::
BlockDesc
*
root_block
)
{
new
(
&
self
)
imperative
::
Tracer
(
root_block
);
})
.
def
(
"trace"
,
&
imperative
::
Tracer
::
Trace
)
.
def
(
"trace"
,
&
imperative
::
Tracer
::
Trace
)
.
def_property
(
"scope"
,
.
def
(
"get_scope"
,
&
imperative
::
Tracer
::
GetScope
,
[](
const
imperative
::
Tracer
&
self
)
{
return
self
.
Scope
();
},
pybind11
::
return_value_policy
::
reference
);
[](
imperative
::
Tracer
&
self
,
framework
::
Scope
*
scope
)
{
self
.
SetScope
(
scope
);
},
R"DOC()DOC"
)
.
def_property
(
"block"
,
[](
const
imperative
::
Tracer
&
self
)
{
return
self
.
Block
();
},
[](
imperative
::
Tracer
&
self
,
framework
::
BlockDesc
*
block
)
{
self
.
SetBlock
(
block
);
},
R"DOC()DOC"
);
}
}
}
// namespace pybind
}
// namespace pybind
...
...
python/paddle/fluid/framework.py
浏览文件 @
b80fe826
...
@@ -358,11 +358,13 @@ class Variable(core.VarBase):
...
@@ -358,11 +358,13 @@ class Variable(core.VarBase):
self
.
stop_gradient
=
stop_gradient
self
.
stop_gradient
=
stop_gradient
self
.
is_data
=
is_data
self
.
is_data
=
is_data
def
numpy
(
self
,
scope
):
def
numpy
(
self
):
scope
=
_imperative_tracer
().
get_scope
(
self
.
block
.
desc
)
tensor
=
core
.
get_variable_tensor
(
scope
,
self
.
desc
.
name
())
tensor
=
core
.
get_variable_tensor
(
scope
,
self
.
desc
.
name
())
return
np
.
array
(
tensor
)
return
np
.
array
(
tensor
)
def
backward
(
self
,
scope
):
def
backward
(
self
):
scope
=
_imperative_tracer
().
get_scope
(
self
.
block
.
desc
)
self
.
_run_backward
(
scope
)
self
.
_run_backward
(
scope
)
def
grad
(
self
):
def
grad
(
self
):
...
@@ -668,14 +670,14 @@ class Operator(core.OpBase):
...
@@ -668,14 +670,14 @@ class Operator(core.OpBase):
for
inp
in
inputs
.
values
():
for
inp
in
inputs
.
values
():
if
isinstance
(
inp
,
Variable
):
if
isinstance
(
inp
,
Variable
):
input_vars
.
append
(
inp
)
input_vars
.
append
(
inp
)
elif
isinstance
(
inp
,
list
):
elif
isinstance
(
inp
,
list
)
or
isinstance
(
inp
,
tuple
)
:
input_vars
.
extend
(
inp
[:])
input_vars
.
extend
(
inp
[:])
self
.
inputs
=
input_vars
self
.
inputs
=
input_vars
output_vars
=
[]
output_vars
=
[]
for
out
in
outputs
.
values
():
for
out
in
outputs
.
values
():
if
isinstance
(
out
,
Variable
):
if
isinstance
(
out
,
Variable
):
output_vars
.
append
(
out
)
output_vars
.
append
(
out
)
elif
isinstance
(
inp
,
list
):
elif
isinstance
(
out
,
list
)
or
isinstance
(
out
,
tuple
):
output_vars
.
extend
(
out
[:])
output_vars
.
extend
(
out
[:])
self
.
outputs
=
output_vars
self
.
outputs
=
output_vars
...
@@ -1246,7 +1248,7 @@ class Block(object):
...
@@ -1246,7 +1248,7 @@ class Block(object):
if
_in_imperative_mode
():
if
_in_imperative_mode
():
op_desc
=
core
.
OpDesc
()
op_desc
=
core
.
OpDesc
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
_imperative_tracer
().
trace
(
op
,
op
.
inputs
,
op
.
outputs
)
_imperative_tracer
().
trace
(
op
,
op
.
inputs
,
op
.
outputs
,
self
.
desc
)
else
:
else
:
op_desc
=
self
.
desc
.
append_op
()
op_desc
=
self
.
desc
.
append_op
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
...
@@ -2257,9 +2259,9 @@ def _get_var(name, program=None):
...
@@ -2257,9 +2259,9 @@ def _get_var(name, program=None):
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
_imperative_guard
():
def
_imperative_guard
(
tracer
):
global
_imperative_tracer_
global
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
_imperative_tracer_
=
core
.
Tracer
()
_imperative_tracer_
=
tracer
yield
yield
_imperative_tracer_
=
tmp_trace
_imperative_tracer_
=
tmp_trace
python/paddle/fluid/imperative/base.py
浏览文件 @
b80fe826
...
@@ -12,10 +12,12 @@
...
@@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
contextlib
import
contextlib
import
numpy
as
np
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle.fluid
import
framework
from
paddle.fluid
import
framework
__all__
=
[
'enabled'
,
'guard'
]
__all__
=
[
'enabled'
,
'guard'
,
'to_variable'
]
def
enabled
():
def
enabled
():
...
@@ -26,8 +28,29 @@ def enabled():
...
@@ -26,8 +28,29 @@ def enabled():
def
guard
():
def
guard
():
train
=
framework
.
Program
()
train
=
framework
.
Program
()
startup
=
framework
.
Program
()
startup
=
framework
.
Program
()
tracer
=
core
.
Tracer
(
train
.
current_block
().
desc
)
with
framework
.
program_guard
(
train
,
startup
):
with
framework
.
program_guard
(
train
,
startup
):
with
framework
.
unique_name
.
guard
():
with
framework
.
unique_name
.
guard
():
with
framework
.
_imperative_guard
():
with
framework
.
_imperative_guard
(
tracer
):
yield
yield
# TODO: check train, startup not changed.
def
to_variable
(
value
,
block
=
None
):
if
isinstance
(
value
,
np
.
ndarray
):
if
not
block
:
block
=
framework
.
default_main_program
().
current_block
()
py_var
=
framework
.
Variable
(
block
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
name
=
None
,
shape
=
value
.
shape
,
dtype
=
value
.
dtype
)
scope
=
framework
.
_imperative_tracer
().
get_scope
(
block
.
desc
)
var
=
scope
.
var
(
py_var
.
name
)
tensor
=
var
.
get_tensor
()
tensor
.
set
(
value
,
core
.
CPUPlace
())
return
py_var
elif
isinstance
(
value
,
framework
.
Variable
):
return
value
else
:
raise
ValueError
(
"Unsupported type %s"
%
type
(
value
))
python/paddle/fluid/imperative/layers.py
浏览文件 @
b80fe826
...
@@ -18,43 +18,25 @@ import numpy as np
...
@@ -18,43 +18,25 @@ import numpy as np
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle.fluid
import
framework
from
paddle.fluid
import
framework
from
paddle.fluid.imperative
import
base
__all__
=
[
'PyLayer'
]
__all__
=
[
'PyLayer'
]
@
contextlib
.
contextmanager
def
trace_scope
(
scope
,
block
):
tmp_scope
=
framework
.
_imperative_tracer
().
scope
tmp_block
=
framework
.
_imperative_tracer
().
block
framework
.
_imperative_tracer
().
scope
=
scope
framework
.
_imperative_tracer
().
block
=
block
yield
framework
.
_imperative_tracer
().
scope
=
tmp_scope
framework
.
_imperative_tracer
().
block
=
tmp_block
class
PyLayer
(
core
.
Layer
):
class
PyLayer
(
core
.
Layer
):
def
__init__
(
self
):
def
__init__
(
self
):
self
.
_scope
=
core
.
Scope
()
pass
self
.
_block
=
framework
.
default_main_program
().
current_block
()
def
__call__
(
self
,
inputs
):
def
__call__
(
self
,
inputs
):
with
trace_scope
(
self
.
_scope
,
self
.
_block
.
desc
):
# TODO(panyx0718): Support declarative mode as well.
assert
base
.
enabled
()
if
not
isinstance
(
inputs
,
list
)
and
not
isinstance
(
inputs
,
tuple
):
if
not
isinstance
(
inputs
,
list
)
and
not
isinstance
(
inputs
,
tuple
):
inputs
=
[
inputs
]
inputs
=
[
inputs
]
var_inputs
=
[]
var_inputs
=
[]
for
x
in
inputs
:
for
x
in
inputs
:
if
isinstance
(
x
,
np
.
ndarray
):
if
isinstance
(
x
,
np
.
ndarray
):
py_var
=
framework
.
Variable
(
py_var
=
base
.
to_variable
(
x
)
self
.
_block
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
name
=
None
,
shape
=
x
.
shape
,
dtype
=
x
.
dtype
)
var
=
self
.
_scope
.
var
(
py_var
.
name
)
tensor
=
var
.
get_tensor
()
tensor
.
set
(
x
,
core
.
CPUPlace
())
var_inputs
.
append
(
py_var
)
var_inputs
.
append
(
py_var
)
elif
isinstance
(
x
,
framework
.
Variable
):
elif
isinstance
(
x
,
framework
.
Variable
):
var_inputs
.
append
(
x
)
var_inputs
.
append
(
x
)
...
@@ -64,5 +46,4 @@ class PyLayer(core.Layer):
...
@@ -64,5 +46,4 @@ class PyLayer(core.Layer):
return
outputs
return
outputs
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
print
(
"at python."
)
return
[]
return
[]
python/paddle/fluid/layer_helper.py
浏览文件 @
b80fe826
...
@@ -23,6 +23,7 @@ import numpy as np
...
@@ -23,6 +23,7 @@ import numpy as np
from
.framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
from
.framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
from
.
import
unique_name
from
.
import
unique_name
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
paddle.fluid.imperative
import
base
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.
import
core
from
.
import
core
from
six.moves
import
zip
from
six.moves
import
zip
...
@@ -62,7 +63,7 @@ class LayerHelper(object):
...
@@ -62,7 +63,7 @@ class LayerHelper(object):
if
isinstance
(
x
,
Variable
):
if
isinstance
(
x
,
Variable
):
return
x
return
x
elif
isinstance
(
x
,
np
.
ndarray
):
elif
isinstance
(
x
,
np
.
ndarray
):
return
self
.
_np_to_variable
(
x
)
return
base
.
to_variable
(
x
,
self
.
main_program
.
current_block
()
)
else
:
else
:
raise
ValueError
(
"inputs wrong type %s
\n
"
%
x
)
raise
ValueError
(
"inputs wrong type %s
\n
"
%
x
)
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
b80fe826
...
@@ -17,7 +17,6 @@ All layers just related to the neural network.
...
@@ -17,7 +17,6 @@ All layers just related to the neural network.
from
__future__
import
print_function
from
__future__
import
print_function
import
sys
import
numpy
as
np
import
numpy
as
np
import
os
import
os
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
b80fe826
...
@@ -43,8 +43,8 @@ class TestImperative(unittest.TestCase):
...
@@ -43,8 +43,8 @@ class TestImperative(unittest.TestCase):
l
=
MyLayer
()
l
=
MyLayer
()
x
=
l
(
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
))[
0
]
x
=
l
(
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
))[
0
]
self
.
assertIsNotNone
(
x
)
self
.
assertIsNotNone
(
x
)
sys
.
stderr
.
write
(
"%s output: %s
\n
"
%
(
x
,
x
.
numpy
(
scope
=
l
.
_scope
)))
sys
.
stderr
.
write
(
"%s output: %s
\n
"
%
(
x
,
x
.
numpy
()))
x
.
backward
(
l
.
_scope
)
x
.
backward
()
sys
.
stderr
.
write
(
"grad %s
\n
"
%
l
.
_x_for_debug
.
grad
())
sys
.
stderr
.
write
(
"grad %s
\n
"
%
l
.
_x_for_debug
.
grad
())
...
...
python/setup.py.in
浏览文件 @
b80fe826
...
@@ -101,6 +101,7 @@ packages=['paddle',
...
@@ -101,6 +101,7 @@ packages=['paddle',
'paddle.dataset',
'paddle.dataset',
'paddle.reader',
'paddle.reader',
'paddle.fluid',
'paddle.fluid',
'paddle.fluid.imperative',
'paddle.fluid.proto',
'paddle.fluid.proto',
'paddle.fluid.proto.profiler',
'paddle.fluid.proto.profiler',
'paddle.fluid.layers',
'paddle.fluid.layers',
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录