Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
008ab086
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
008ab086
编写于
9月 29, 2018
作者:
T
tangwei12
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'release/1.0.0' of github.com:PaddlePaddle/Paddle into release/1.0.0
上级
da4129f2
8d16de73
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
437 addition
and
217 deletion
+437
-217
paddle/contrib/float16/float16_transpiler.py
paddle/contrib/float16/float16_transpiler.py
+3
-3
paddle/fluid/API.spec
paddle/fluid/API.spec
+7
-24
paddle/fluid/framework/ir/graph_traits.cc
paddle/fluid/framework/ir/graph_traits.cc
+2
-0
paddle/fluid/operators/sampling_id_op.cc
paddle/fluid/operators/sampling_id_op.cc
+8
-7
paddle/fluid/operators/sequence_slice_op.h
paddle/fluid/operators/sequence_slice_op.h
+2
-2
paddle/fluid/pybind/protobuf.cc
paddle/fluid/pybind/protobuf.cc
+5
-5
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+4
-4
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+12
-12
python/paddle/fluid/clip.py
python/paddle/fluid/clip.py
+2
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+20
-22
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+3
-3
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+263
-104
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+0
-7
python/paddle/fluid/tests/unittests/dist_transformer.py
python/paddle/fluid/tests/unittests/dist_transformer.py
+1
-1
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
+19
-0
python/paddle/fluid/tests/unittests/test_infer_shape.py
python/paddle/fluid/tests/unittests/test_infer_shape.py
+2
-2
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+60
-1
python/paddle/fluid/tests/unittests/test_protobuf_descs.py
python/paddle/fluid/tests/unittests/test_protobuf_descs.py
+10
-10
python/paddle/fluid/transpiler/details/program_utils.py
python/paddle/fluid/transpiler/details/program_utils.py
+2
-2
python/paddle/fluid/transpiler/distribute_transpiler.py
python/paddle/fluid/transpiler/distribute_transpiler.py
+8
-2
python/paddle/fluid/transpiler/inference_transpiler.py
python/paddle/fluid/transpiler/inference_transpiler.py
+4
-4
未找到文件。
paddle/contrib/float16/float16_transpiler.py
浏览文件 @
008ab086
...
@@ -102,8 +102,8 @@ class Float16Transpiler:
...
@@ -102,8 +102,8 @@ class Float16Transpiler:
continue
continue
for
input_arg
in
current_op
.
input_arg_names
:
for
input_arg
in
current_op
.
input_arg_names
:
if
input_arg
in
self
.
input_map
:
if
input_arg
in
self
.
input_map
:
current_op
.
rename_input
(
input_arg
,
current_op
.
_
rename_input
(
input_arg
,
self
.
input_map
[
input_arg
])
self
.
input_map
[
input_arg
])
def
_remove_unused_var
(
self
):
def
_remove_unused_var
(
self
):
'''
'''
...
@@ -187,7 +187,7 @@ class Float16Transpiler:
...
@@ -187,7 +187,7 @@ class Float16Transpiler:
shape
=
var
.
shape
,
shape
=
var
.
shape
,
persistable
=
var
.
persistable
)
persistable
=
var
.
persistable
)
find_op
(
var
)
find_op
(
var
)
var
.
op
.
rename_output
(
var_name
,
tmp_var_name
)
var
.
op
.
_
rename_output
(
var_name
,
tmp_var_name
)
self
.
block
.
_insert_op
(
self
.
block
.
_insert_op
(
i
,
i
,
type
=
"cast"
,
type
=
"cast"
,
...
...
paddle/fluid/API.spec
浏览文件 @
008ab086
...
@@ -6,26 +6,9 @@ paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=
...
@@ -6,26 +6,9 @@ paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=
paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.Operator.__init__ ArgSpec(args=['self', 'block', 'desc', 'type', 'inputs', 'outputs', 'attrs'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.Operator.all_attrs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.attr_type ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.block_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.block_attr_id ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.blocks_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.blocks_attr_ids ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.has_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.has_kernel ArgSpec(args=['self', 'op_type'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.input ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.output ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.rename_input ArgSpec(args=['self', 'old_name', 'new_name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.rename_output ArgSpec(args=['self', 'old_name', 'new_name'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.set_attr ArgSpec(args=['self', 'name', 'val'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Operator.to_string ArgSpec(args=['self', 'throw_on_error'], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.name_scope ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.name_scope ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
...
@@ -170,6 +153,13 @@ paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_
...
@@ -170,6 +153,13 @@ paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0))
paddle.fluid.layers.gaussian_random ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32', False))
paddle.fluid.layers.sampling_id ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32'))
paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32'))
paddle.fluid.layers.sum ArgSpec(args=['x', 'use_mkldnn'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.layers.slice ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.shape ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
...
@@ -241,13 +231,6 @@ paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwarg
...
@@ -241,13 +231,6 @@ paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwarg
paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.gaussian_random ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sampling_id ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sum ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.slice ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.shape ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
...
...
paddle/fluid/framework/ir/graph_traits.cc
浏览文件 @
008ab086
...
@@ -14,6 +14,8 @@
...
@@ -14,6 +14,8 @@
#include "paddle/fluid/framework/ir/graph_traits.h"
#include "paddle/fluid/framework/ir/graph_traits.h"
#include <vector>
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
namespace
ir
{
namespace
ir
{
...
...
paddle/fluid/operators/sampling_id_op.cc
浏览文件 @
008ab086
...
@@ -53,15 +53,16 @@ class SamplingIdOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -53,15 +53,16 @@ class SamplingIdOpMaker : public framework::OpProtoAndCheckerMaker {
SamplingId Operator.
SamplingId Operator.
A layer for sampling id from multinomial distribution from the
A layer for sampling id from multinomial distribution from the
input. Sampling one id for one sample.)DOC"
);
input. Sampling one id for one sample.)DOC"
);
AddAttr
<
float
>
(
"min"
,
"Minimum value of random.
[default 0.0]
."
)
AddAttr
<
float
>
(
"min"
,
"Minimum value of random.
(float, default 0.0)
."
)
.
SetDefault
(
0.0
f
);
.
SetDefault
(
0.0
f
);
AddAttr
<
float
>
(
"max"
,
"Maximun value of random.
[default 1.0]
."
)
AddAttr
<
float
>
(
"max"
,
"Maximun value of random.
(float, default 1.0)
."
)
.
SetDefault
(
1.0
f
);
.
SetDefault
(
1.0
f
);
AddAttr
<
int
>
(
"seed"
,
AddAttr
<
int
>
(
"Random seed used for the random number engine. "
"seed"
,
"0 means use a seed generated by the system."
"Random seed used for the random number engine. "
"Note that if seed is not 0, this operator will always "
"0 means use a seed generated by the system."
"generate the same random numbers every time. [default 0]."
)
"Note that if seed is not 0, this operator will always "
"generate the same random numbers every time. (int, default 0)."
)
.
SetDefault
(
0
);
.
SetDefault
(
0
);
}
}
};
};
...
...
paddle/fluid/operators/sequence_slice_op.h
浏览文件 @
008ab086
...
@@ -75,11 +75,11 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> {
...
@@ -75,11 +75,11 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> {
}
}
for
(
size_t
i
=
0
;
i
<
n
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
n
;
++
i
)
{
PADDLE_ENFORCE_L
T
(
0
,
offset_data
[
i
],
PADDLE_ENFORCE_L
E
(
0
,
offset_data
[
i
],
"The offset[%d] must greater than zero."
,
i
);
"The offset[%d] must greater than zero."
,
i
);
PADDLE_ENFORCE_LT
(
0
,
length_data
[
i
],
PADDLE_ENFORCE_LT
(
0
,
length_data
[
i
],
"The length[%d] must greater than zero."
,
i
);
"The length[%d] must greater than zero."
,
i
);
PADDLE_ENFORCE_L
T
(
lod
[
0
][
i
]
+
offset_data
[
i
]
+
length_data
[
i
],
PADDLE_ENFORCE_L
E
(
lod
[
0
][
i
]
+
offset_data
[
i
]
+
length_data
[
i
],
lod
[
0
][
i
+
1
],
"The target tensor's length overflow."
);
lod
[
0
][
i
+
1
],
"The target tensor's length overflow."
);
}
}
...
...
paddle/fluid/pybind/protobuf.cc
浏览文件 @
008ab086
...
@@ -285,12 +285,12 @@ void BindOpDesc(pybind11::module *m) {
...
@@ -285,12 +285,12 @@ void BindOpDesc(pybind11::module *m) {
.
def
(
"set_output"
,
&
pd
::
OpDesc
::
SetOutput
)
.
def
(
"set_output"
,
&
pd
::
OpDesc
::
SetOutput
)
.
def
(
"input_arg_names"
,
&
pd
::
OpDesc
::
InputArgumentNames
)
.
def
(
"input_arg_names"
,
&
pd
::
OpDesc
::
InputArgumentNames
)
.
def
(
"output_arg_names"
,
&
pd
::
OpDesc
::
OutputArgumentNames
)
.
def
(
"output_arg_names"
,
&
pd
::
OpDesc
::
OutputArgumentNames
)
.
def
(
"rename_input"
,
&
pd
::
OpDesc
::
RenameInput
)
.
def
(
"
_
rename_input"
,
&
pd
::
OpDesc
::
RenameInput
)
.
def
(
"rename_output"
,
&
pd
::
OpDesc
::
RenameOutput
)
.
def
(
"
_
rename_output"
,
&
pd
::
OpDesc
::
RenameOutput
)
.
def
(
"has_attr"
,
&
pd
::
OpDesc
::
HasAttr
)
.
def
(
"has_attr"
,
&
pd
::
OpDesc
::
HasAttr
)
.
def
(
"attr_type"
,
&
pd
::
OpDesc
::
GetAttrType
)
.
def
(
"attr_type"
,
&
pd
::
OpDesc
::
GetAttrType
)
.
def
(
"attr_names"
,
&
pd
::
OpDesc
::
AttrNames
)
.
def
(
"attr_names"
,
&
pd
::
OpDesc
::
AttrNames
)
.
def
(
"set_attr"
,
&
pd
::
OpDesc
::
SetAttr
)
.
def
(
"
_
set_attr"
,
&
pd
::
OpDesc
::
SetAttr
)
.
def
(
"attr"
,
&
pd
::
OpDesc
::
GetAttr
)
.
def
(
"attr"
,
&
pd
::
OpDesc
::
GetAttr
)
.
def
(
"set_block_attr"
,
&
pd
::
OpDesc
::
SetBlockAttr
)
.
def
(
"set_block_attr"
,
&
pd
::
OpDesc
::
SetBlockAttr
)
.
def
(
"set_blocks_attr"
,
&
pd
::
OpDesc
::
SetBlocksAttr
)
.
def
(
"set_blocks_attr"
,
&
pd
::
OpDesc
::
SetBlocksAttr
)
...
@@ -300,8 +300,8 @@ void BindOpDesc(pybind11::module *m) {
...
@@ -300,8 +300,8 @@ void BindOpDesc(pybind11::module *m) {
std
::
string
ser
(
seriralized
);
std
::
string
ser
(
seriralized
);
self
.
SetAttr
(
name
,
ser
);
self
.
SetAttr
(
name
,
ser
);
})
})
.
def
(
"block_attr_id"
,
&
pd
::
OpDesc
::
GetBlockAttrId
)
.
def
(
"
_
block_attr_id"
,
&
pd
::
OpDesc
::
GetBlockAttrId
)
.
def
(
"blocks_attr_ids"
,
&
pd
::
OpDesc
::
GetBlocksAttrIds
)
.
def
(
"
_
blocks_attr_ids"
,
&
pd
::
OpDesc
::
GetBlocksAttrIds
)
.
def
(
"check_attrs"
,
&
pd
::
OpDesc
::
CheckAttrs
)
.
def
(
"check_attrs"
,
&
pd
::
OpDesc
::
CheckAttrs
)
.
def
(
"infer_shape"
,
&
pd
::
OpDesc
::
InferShape
)
.
def
(
"infer_shape"
,
&
pd
::
OpDesc
::
InferShape
)
.
def
(
"infer_var_type"
,
&
pd
::
OpDesc
::
InferVarType
)
.
def
(
"infer_var_type"
,
&
pd
::
OpDesc
::
InferVarType
)
...
...
paddle/scripts/paddle_build.sh
浏览文件 @
008ab086
...
@@ -629,10 +629,10 @@ EOF
...
@@ -629,10 +629,10 @@ EOF
function
gen_capi_package
()
{
function
gen_capi_package
()
{
if
[[
${
WITH_C_API
}
==
"ON"
]]
;
then
if
[[
${
WITH_C_API
}
==
"ON"
]]
;
then
install_prefix
=
"
${
PADDLE_ROOT
}
/build/capi_output"
capi_install_prefix
=
${
INSTALL_PREFIX
:-
/paddle/build
}
/capi_output
rm
-rf
$install_prefix
rm
-rf
$
capi_
install_prefix
make
DESTDIR
=
"
$install_prefix
"
install
make
DESTDIR
=
"
$
capi_
install_prefix
"
install
cd
$
install_prefix
/usr/local
cd
$
capi_install_prefix
/
ls
| egrep
-v
"^Found.*item$"
| xargs
tar
-czf
${
PADDLE_ROOT
}
/build/paddle.tgz
ls
| egrep
-v
"^Found.*item$"
| xargs
tar
-czf
${
PADDLE_ROOT
}
/build/paddle.tgz
fi
fi
}
}
...
...
python/paddle/fluid/backward.py
浏览文件 @
008ab086
...
@@ -38,8 +38,8 @@ def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
...
@@ -38,8 +38,8 @@ def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
op_desc
=
op_descs
[
i
]
op_desc
=
op_descs
[
i
]
if
isinstance
(
op_desc
,
tuple
):
if
isinstance
(
op_desc
,
tuple
):
op_desc
=
op_desc
[
0
]
op_desc
=
op_desc
[
0
]
op_desc
.
rename_input
(
old_name
,
new_name
)
op_desc
.
_
rename_input
(
old_name
,
new_name
)
op_desc
.
rename_output
(
old_name
,
new_name
)
op_desc
.
_
rename_output
(
old_name
,
new_name
)
def
_create_op_desc_
(
op_type
,
inputs
,
outputs
,
attrs
):
def
_create_op_desc_
(
op_type
,
inputs
,
outputs
,
attrs
):
...
@@ -70,7 +70,7 @@ def _create_op_desc_(op_type, inputs, outputs, attrs):
...
@@ -70,7 +70,7 @@ def _create_op_desc_(op_type, inputs, outputs, attrs):
if
isinstance
(
val
,
framework
.
Block
):
if
isinstance
(
val
,
framework
.
Block
):
op_desc
.
set_block_attr
(
name
,
val
.
desc
)
op_desc
.
set_block_attr
(
name
,
val
.
desc
)
else
:
else
:
op_desc
.
set_attr
(
name
,
val
)
op_desc
.
_
set_attr
(
name
,
val
)
return
op_desc
return
op_desc
...
@@ -346,7 +346,7 @@ def _append_backward_ops_(block,
...
@@ -346,7 +346,7 @@ def _append_backward_ops_(block,
grad_sub_block_list
=
[]
grad_sub_block_list
=
[]
# If the op has its own sub-block, deal with the sub-block first
# If the op has its own sub-block, deal with the sub-block first
if
op
.
has_attr
(
"sub_block"
):
if
op
.
has_attr
(
"sub_block"
):
sub_block
=
program
.
block
(
op
.
block_attr_id
(
"sub_block"
))
sub_block
=
program
.
block
(
op
.
_
block_attr_id
(
"sub_block"
))
grad_sub_block
=
program
.
_create_block
()
grad_sub_block
=
program
.
_create_block
()
grad_sub_block
.
_set_forward_block_idx
(
sub_block
.
idx
)
grad_sub_block
.
_set_forward_block_idx
(
sub_block
.
idx
)
cb
=
_callback_lookup_
(
op
)
cb
=
_callback_lookup_
(
op
)
...
@@ -382,7 +382,7 @@ def _append_backward_ops_(block,
...
@@ -382,7 +382,7 @@ def _append_backward_ops_(block,
for
op_desc
in
grad_op_descs
:
for
op_desc
in
grad_op_descs
:
new_op_desc
=
target_block
.
desc
.
append_op
()
new_op_desc
=
target_block
.
desc
.
append_op
()
new_op_desc
.
copy_from
(
op_desc
)
new_op_desc
.
copy_from
(
op_desc
)
new_op_desc
.
set_attr
(
op_role_attr_name
,
backward
)
new_op_desc
.
_
set_attr
(
op_role_attr_name
,
backward
)
grad_to_var
[
"__current_op_desc__"
]
=
new_op_desc
grad_to_var
[
"__current_op_desc__"
]
=
new_op_desc
if
callbacks
is
not
None
:
if
callbacks
is
not
None
:
assert
(
isinstance
(
callbacks
,
list
))
assert
(
isinstance
(
callbacks
,
list
))
...
@@ -408,7 +408,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
...
@@ -408,7 +408,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
for
op_idx
in
range
(
start_op_idx
,
block
.
desc
.
op_size
()):
for
op_idx
in
range
(
start_op_idx
,
block
.
desc
.
op_size
()):
op_desc
=
block
.
desc
.
op
(
op_idx
)
op_desc
=
block
.
desc
.
op
(
op_idx
)
if
op_desc
.
has_attr
(
"sub_block"
):
if
op_desc
.
has_attr
(
"sub_block"
):
sub_block
=
block
.
program
.
block
(
op_desc
.
block_attr_id
(
"sub_block"
))
sub_block
=
block
.
program
.
block
(
op_desc
.
_
block_attr_id
(
"sub_block"
))
_append_backward_vars_
(
sub_block
,
0
,
grad_to_var
,
grad_info_map
)
_append_backward_vars_
(
sub_block
,
0
,
grad_to_var
,
grad_info_map
)
new_vars
=
set
()
new_vars
=
set
()
# create new gradient variables
# create new gradient variables
...
@@ -438,12 +438,12 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
...
@@ -438,12 +438,12 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
op_desc
=
block
.
desc
.
op
(
op_idx
)
op_desc
=
block
.
desc
.
op
(
op_idx
)
for
name
in
op_desc
.
input_arg_names
():
for
name
in
op_desc
.
input_arg_names
():
if
name
in
var_map
:
if
name
in
var_map
:
op_desc
.
rename_input
(
name
,
var_map
[
name
])
op_desc
.
_
rename_input
(
name
,
var_map
[
name
])
for
name
in
op_desc
.
output_arg_names
():
for
name
in
op_desc
.
output_arg_names
():
if
block
.
desc
.
find_var
(
name
.
encode
(
"ascii"
)):
if
block
.
desc
.
find_var
(
name
.
encode
(
"ascii"
)):
new_name
=
unique_name
.
generate
(
name
)
new_name
=
unique_name
.
generate
(
name
)
op_desc
.
rename_output
(
name
,
new_name
)
op_desc
.
_
rename_output
(
name
,
new_name
)
var_map
[
name
]
=
new_name
var_map
[
name
]
=
new_name
for
g
,
ng
in
six
.
iteritems
(
var_map
):
for
g
,
ng
in
six
.
iteritems
(
var_map
):
...
@@ -542,9 +542,9 @@ def append_backward(loss, parameter_list=None, no_grad_set=None,
...
@@ -542,9 +542,9 @@ def append_backward(loss, parameter_list=None, no_grad_set=None,
if
loss
.
op
is
None
:
if
loss
.
op
is
None
:
raise
ValueError
(
"loss.op is None. Should not happend"
)
raise
ValueError
(
"loss.op is None. Should not happend"
)
loss
.
op
.
set_attr
(
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
(),
loss
.
op
.
_
set_attr
(
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
(),
int
(
core
.
op_proto_and_checker_maker
.
OpRole
.
Forward
)
|
int
(
core
.
op_proto_and_checker_maker
.
OpRole
.
Forward
)
|
int
(
core
.
op_proto_and_checker_maker
.
OpRole
.
Loss
))
int
(
core
.
op_proto_and_checker_maker
.
OpRole
.
Loss
))
if
callbacks
is
not
None
:
if
callbacks
is
not
None
:
isinstance
(
callbacks
,
list
)
isinstance
(
callbacks
,
list
)
...
@@ -631,7 +631,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None,
...
@@ -631,7 +631,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None,
attr_val
=
[
p
.
name
,
g
.
name
]
attr_val
=
[
p
.
name
,
g
.
name
]
if
g
.
op
.
has_attr
(
op_role_var_attr_name
):
if
g
.
op
.
has_attr
(
op_role_var_attr_name
):
attr_val
.
extend
(
g
.
op
.
attr
(
op_role_var_attr_name
))
attr_val
.
extend
(
g
.
op
.
attr
(
op_role_var_attr_name
))
g
.
op
.
set_attr
(
op_role_var_attr_name
,
attr_val
)
g
.
op
.
_
set_attr
(
op_role_var_attr_name
,
attr_val
)
return
params_and_grads
return
params_and_grads
...
...
python/paddle/fluid/clip.py
浏览文件 @
008ab086
...
@@ -75,8 +75,8 @@ class ErrorClipByValue(BaseErrorClipAttr):
...
@@ -75,8 +75,8 @@ class ErrorClipByValue(BaseErrorClipAttr):
clip_op_desc
.
set_type
(
"clip"
)
clip_op_desc
.
set_type
(
"clip"
)
clip_op_desc
.
set_input
(
"X"
,
[
grad_name
])
clip_op_desc
.
set_input
(
"X"
,
[
grad_name
])
clip_op_desc
.
set_output
(
"Out"
,
[
grad_name
])
clip_op_desc
.
set_output
(
"Out"
,
[
grad_name
])
clip_op_desc
.
set_attr
(
"min"
,
self
.
min
)
clip_op_desc
.
_
set_attr
(
"min"
,
self
.
min
)
clip_op_desc
.
set_attr
(
"max"
,
self
.
max
)
clip_op_desc
.
_
set_attr
(
"max"
,
self
.
max
)
def
error_clip_callback
(
block
,
context
):
def
error_clip_callback
(
block
,
context
):
...
...
python/paddle/fluid/framework.py
浏览文件 @
008ab086
...
@@ -37,11 +37,9 @@ from . import unique_name
...
@@ -37,11 +37,9 @@ from . import unique_name
__all__
=
[
__all__
=
[
'Program'
,
'Program'
,
'Operator'
,
'default_startup_program'
,
'default_startup_program'
,
'default_main_program'
,
'default_main_program'
,
'program_guard'
,
'program_guard'
,
'get_var'
,
'name_scope'
,
'name_scope'
,
]
]
...
@@ -654,11 +652,11 @@ class Operator(object):
...
@@ -654,11 +652,11 @@ class Operator(object):
self
.
_update_desc_attr
(
attr_name
,
attr_val
)
self
.
_update_desc_attr
(
attr_name
,
attr_val
)
self
.
desc
.
check_attrs
()
self
.
desc
.
check_attrs
()
if
self
.
has_kernel
(
type
):
if
self
.
_
has_kernel
(
type
):
self
.
desc
.
infer_var_type
(
self
.
block
.
desc
)
self
.
desc
.
infer_var_type
(
self
.
block
.
desc
)
self
.
desc
.
infer_shape
(
self
.
block
.
desc
)
self
.
desc
.
infer_shape
(
self
.
block
.
desc
)
def
has_kernel
(
self
,
op_type
):
def
_
has_kernel
(
self
,
op_type
):
return
op_type
not
in
self
.
OP_WITHOUT_KERNEL_SET
return
op_type
not
in
self
.
OP_WITHOUT_KERNEL_SET
def
to_string
(
self
,
throw_on_error
):
def
to_string
(
self
,
throw_on_error
):
...
@@ -699,7 +697,7 @@ class Operator(object):
...
@@ -699,7 +697,7 @@ class Operator(object):
"""
"""
return
self
.
desc
.
input
(
name
)
return
self
.
desc
.
input
(
name
)
def
rename_input
(
self
,
old_name
,
new_name
):
def
_
rename_input
(
self
,
old_name
,
new_name
):
"""
"""
Rename the `old_name` to `new_name`.
Rename the `old_name` to `new_name`.
...
@@ -710,9 +708,9 @@ class Operator(object):
...
@@ -710,9 +708,9 @@ class Operator(object):
Returns:
Returns:
None
None
"""
"""
self
.
desc
.
rename_input
(
old_name
,
new_name
)
self
.
desc
.
_
rename_input
(
old_name
,
new_name
)
def
rename_output
(
self
,
old_name
,
new_name
):
def
_
rename_output
(
self
,
old_name
,
new_name
):
"""
"""
Rename the `old_name` to `new_name`.
Rename the `old_name` to `new_name`.
...
@@ -723,7 +721,7 @@ class Operator(object):
...
@@ -723,7 +721,7 @@ class Operator(object):
Returns:
Returns:
None
None
"""
"""
self
.
desc
.
rename_output
(
old_name
,
new_name
)
self
.
desc
.
_
rename_output
(
old_name
,
new_name
)
@
property
@
property
def
input_names
(
self
):
def
input_names
(
self
):
...
@@ -787,7 +785,7 @@ class Operator(object):
...
@@ -787,7 +785,7 @@ class Operator(object):
"""
"""
return
self
.
desc
.
attr_type
(
name
)
return
self
.
desc
.
attr_type
(
name
)
def
set_attr
(
self
,
name
,
val
):
def
_
set_attr
(
self
,
name
,
val
):
"""
"""
Set the value of attribute by attribute's name.
Set the value of attribute by attribute's name.
...
@@ -820,7 +818,7 @@ class Operator(object):
...
@@ -820,7 +818,7 @@ class Operator(object):
isinstance
(
val
,
core
.
ProgramDesc
):
isinstance
(
val
,
core
.
ProgramDesc
):
self
.
desc
.
set_serialized_attr
(
name
,
val
.
serialize_to_string
())
self
.
desc
.
set_serialized_attr
(
name
,
val
.
serialize_to_string
())
else
:
else
:
self
.
desc
.
set_attr
(
name
,
val
)
self
.
desc
.
_
set_attr
(
name
,
val
)
@
property
@
property
def
attr_names
(
self
):
def
attr_names
(
self
):
...
@@ -839,7 +837,7 @@ class Operator(object):
...
@@ -839,7 +837,7 @@ class Operator(object):
"""
"""
return
self
.
desc
.
attr
(
name
)
return
self
.
desc
.
attr
(
name
)
def
block_attr_id
(
self
,
name
):
def
_
block_attr_id
(
self
,
name
):
"""
"""
Get the block attribute's id by name.
Get the block attribute's id by name.
...
@@ -849,9 +847,9 @@ class Operator(object):
...
@@ -849,9 +847,9 @@ class Operator(object):
Returns:
Returns:
int: the block index.
int: the block index.
"""
"""
return
self
.
desc
.
block_attr_id
(
name
)
return
self
.
desc
.
_
block_attr_id
(
name
)
def
block_attr
(
self
,
name
):
def
_
block_attr
(
self
,
name
):
"""
"""
Get the block attribute by name.
Get the block attribute by name.
...
@@ -862,11 +860,11 @@ class Operator(object):
...
@@ -862,11 +860,11 @@ class Operator(object):
block: the block attribute.
block: the block attribute.
"""
"""
id
=
self
.
block_attr_id
(
name
)
id
=
self
.
_
block_attr_id
(
name
)
assert
(
id
>=
0
and
id
<
len
(
self
.
block
.
program
.
blocks
))
assert
(
id
>=
0
and
id
<
len
(
self
.
block
.
program
.
blocks
))
return
self
.
block
.
program
.
blocks
[
id
]
return
self
.
block
.
program
.
blocks
[
id
]
def
blocks_attr
(
self
,
name
):
def
_
blocks_attr
(
self
,
name
):
"""
"""
Get the blocks attribute by name.
Get the blocks attribute by name.
...
@@ -877,13 +875,13 @@ class Operator(object):
...
@@ -877,13 +875,13 @@ class Operator(object):
list: list of the blocks attribute.
list: list of the blocks attribute.
"""
"""
attrs
=
[]
attrs
=
[]
for
i
in
self
.
blocks_attr_ids
(
name
):
for
i
in
self
.
_
blocks_attr_ids
(
name
):
assert
(
i
>=
0
and
i
<
len
(
self
.
block
.
program
.
blocks
))
assert
(
i
>=
0
and
i
<
len
(
self
.
block
.
program
.
blocks
))
attrs
.
append
(
self
.
block
.
program
.
blocks
[
i
])
attrs
.
append
(
self
.
block
.
program
.
blocks
[
i
])
return
attrs
return
attrs
def
blocks_attr_ids
(
self
,
name
):
def
_
blocks_attr_ids
(
self
,
name
):
"""
"""
Get the blocks attribute's ids by name.
Get the blocks attribute's ids by name.
...
@@ -894,7 +892,7 @@ class Operator(object):
...
@@ -894,7 +892,7 @@ class Operator(object):
list: list of the blocks ids.
list: list of the blocks ids.
"""
"""
return
self
.
desc
.
blocks_attr_ids
(
name
)
return
self
.
desc
.
_
blocks_attr_ids
(
name
)
def
all_attrs
(
self
):
def
all_attrs
(
self
):
"""
"""
...
@@ -908,11 +906,11 @@ class Operator(object):
...
@@ -908,11 +906,11 @@ class Operator(object):
for
n
in
attr_names
:
for
n
in
attr_names
:
attr_type
=
self
.
desc
.
attr_type
(
n
)
attr_type
=
self
.
desc
.
attr_type
(
n
)
if
attr_type
==
core
.
AttrType
.
BLOCK
:
if
attr_type
==
core
.
AttrType
.
BLOCK
:
attr_map
[
n
]
=
self
.
block_attr
(
n
)
attr_map
[
n
]
=
self
.
_
block_attr
(
n
)
continue
continue
if
attr_type
==
core
.
AttrType
.
BLOCKS
:
if
attr_type
==
core
.
AttrType
.
BLOCKS
:
attr_map
[
n
]
=
self
.
blocks_attr
(
n
)
attr_map
[
n
]
=
self
.
_
blocks_attr
(
n
)
continue
continue
attr_map
[
n
]
=
self
.
attr
(
n
)
attr_map
[
n
]
=
self
.
attr
(
n
)
...
@@ -1786,7 +1784,7 @@ class Program(object):
...
@@ -1786,7 +1784,7 @@ class Program(object):
for
j
in
six
.
moves
.
range
(
block
.
op_size
()):
for
j
in
six
.
moves
.
range
(
block
.
op_size
()):
op
=
block
.
op
(
j
)
op
=
block
.
op
(
j
)
if
op
.
has_attr
(
'is_test'
):
if
op
.
has_attr
(
'is_test'
):
op
.
set_attr
(
'is_test'
,
True
)
op
.
_
set_attr
(
'is_test'
,
True
)
res
.
blocks
=
[
res
.
blocks
=
[
Block
(
res
,
i
)
for
i
in
six
.
moves
.
range
(
res
.
desc
.
num_blocks
())
Block
(
res
,
i
)
for
i
in
six
.
moves
.
range
(
res
.
desc
.
num_blocks
())
]
]
...
@@ -2160,7 +2158,7 @@ def program_guard(main_program, startup_program=None):
...
@@ -2160,7 +2158,7 @@ def program_guard(main_program, startup_program=None):
switch_startup_program
(
startup_program
)
switch_startup_program
(
startup_program
)
def
get_var
(
name
,
program
=
None
):
def
_
get_var
(
name
,
program
=
None
):
"""
"""
Get a variable by name from the global block of a program.
Get a variable by name from the global block of a program.
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
008ab086
...
@@ -284,7 +284,7 @@ def detection_output(loc,
...
@@ -284,7 +284,7 @@ def detection_output(loc,
target_box
=
loc
,
target_box
=
loc
,
code_type
=
'decode_center_size'
)
code_type
=
'decode_center_size'
)
compile_shape
=
scores
.
shape
compile_shape
=
scores
.
shape
run_shape
=
ops
.
shape
(
scores
)
run_shape
=
nn
.
shape
(
scores
)
scores
=
nn
.
flatten
(
x
=
scores
,
axis
=
2
)
scores
=
nn
.
flatten
(
x
=
scores
,
axis
=
2
)
scores
=
nn
.
softmax
(
input
=
scores
)
scores
=
nn
.
softmax
(
input
=
scores
)
scores
=
nn
.
reshape
(
x
=
scores
,
shape
=
compile_shape
,
actual_shape
=
run_shape
)
scores
=
nn
.
reshape
(
x
=
scores
,
shape
=
compile_shape
,
actual_shape
=
run_shape
)
...
@@ -697,7 +697,7 @@ def ssd_loss(location,
...
@@ -697,7 +697,7 @@ def ssd_loss(location,
raise
ValueError
(
"Only support mining_type == max_negative now."
)
raise
ValueError
(
"Only support mining_type == max_negative now."
)
num
,
num_prior
,
num_class
=
confidence
.
shape
num
,
num_prior
,
num_class
=
confidence
.
shape
conf_shape
=
ops
.
shape
(
confidence
)
conf_shape
=
nn
.
shape
(
confidence
)
def
__reshape_to_2d
(
var
):
def
__reshape_to_2d
(
var
):
return
nn
.
flatten
(
x
=
var
,
axis
=
2
)
return
nn
.
flatten
(
x
=
var
,
axis
=
2
)
...
@@ -724,7 +724,7 @@ def ssd_loss(location,
...
@@ -724,7 +724,7 @@ def ssd_loss(location,
target_label
.
stop_gradient
=
True
target_label
.
stop_gradient
=
True
conf_loss
=
nn
.
softmax_with_cross_entropy
(
confidence
,
target_label
)
conf_loss
=
nn
.
softmax_with_cross_entropy
(
confidence
,
target_label
)
# 3. Mining hard examples
# 3. Mining hard examples
actual_shape
=
ops
.
slice
(
conf_shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
2
])
actual_shape
=
nn
.
slice
(
conf_shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
2
])
actual_shape
.
stop_gradient
=
True
actual_shape
.
stop_gradient
=
True
conf_loss
=
nn
.
reshape
(
conf_loss
=
nn
.
reshape
(
x
=
conf_loss
,
shape
=
(
num
,
num_prior
),
actual_shape
=
actual_shape
)
x
=
conf_loss
,
shape
=
(
num
,
num_prior
),
actual_shape
=
actual_shape
)
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
008ab086
...
@@ -29,110 +29,29 @@ from .. import unique_name
...
@@ -29,110 +29,29 @@ from .. import unique_name
from
functools
import
reduce
from
functools
import
reduce
__all__
=
[
__all__
=
[
'fc'
,
'fc'
,
'embedding'
,
'dynamic_lstm'
,
'dynamic_lstmp'
,
'dynamic_gru'
,
'embedding'
,
'gru_unit'
,
'linear_chain_crf'
,
'crf_decoding'
,
'cos_sim'
,
'cross_entropy'
,
'dynamic_lstm'
,
'square_error_cost'
,
'chunk_eval'
,
'sequence_conv'
,
'conv2d'
,
'conv3d'
,
'dynamic_lstmp'
,
'sequence_pool'
,
'sequence_softmax'
,
'softmax'
,
'pool2d'
,
'pool3d'
,
'dynamic_gru'
,
'batch_norm'
,
'beam_search_decode'
,
'conv2d_transpose'
,
'conv3d_transpose'
,
'gru_unit'
,
'sequence_expand'
,
'sequence_expand_as'
,
'sequence_pad'
,
'lstm_unit'
,
'linear_chain_crf'
,
'reduce_sum'
,
'reduce_mean'
,
'reduce_max'
,
'reduce_min'
,
'reduce_prod'
,
'crf_decoding'
,
'sequence_first_step'
,
'sequence_last_step'
,
'dropout'
,
'split'
,
'cos_sim'
,
'ctc_greedy_decoder'
,
'edit_distance'
,
'l2_normalize'
,
'matmul'
,
'topk'
,
'cross_entropy'
,
'warpctc'
,
'sequence_reshape'
,
'transpose'
,
'im2sequence'
,
'nce'
,
'square_error_cost'
,
'hsigmoid'
,
'beam_search'
,
'row_conv'
,
'multiplex'
,
'layer_norm'
,
'chunk_eval'
,
'softmax_with_cross_entropy'
,
'smooth_l1'
,
'one_hot'
,
'sequence_conv'
,
'autoincreased_step_counter'
,
'reshape'
,
'squeeze'
,
'unsqueeze'
,
'conv2d'
,
'lod_reset'
,
'lrn'
,
'pad'
,
'pad_constant_like'
,
'label_smooth'
,
'roi_pool'
,
'conv3d'
,
'dice_loss'
,
'image_resize'
,
'image_resize_short'
,
'resize_bilinear'
,
'sequence_pool'
,
'gather'
,
'scatter'
,
'sequence_scatter'
,
'random_crop'
,
'mean_iou'
,
'relu'
,
'sequence_softmax'
,
'log'
,
'crop'
,
'rank_loss'
,
'elu'
,
'relu6'
,
'pow'
,
'stanh'
,
'hard_sigmoid'
,
'softmax'
,
'swish'
,
'prelu'
,
'brelu'
,
'leaky_relu'
,
'soft_relu'
,
'flatten'
,
'pool2d'
,
'sequence_mask'
,
'stack'
,
'pad2d'
,
'unstack'
,
'sequence_enumerate'
,
'pool3d'
,
'expand'
,
'sequence_concat'
,
'scale'
,
'elementwise_add'
,
'elementwise_div'
,
'batch_norm'
,
'elementwise_sub'
,
'elementwise_mul'
,
'elementwise_max'
,
'elementwise_min'
,
'beam_search_decode'
,
'elementwise_pow'
,
'uniform_random_batch_size_like'
,
'gaussian_random'
,
'conv2d_transpose'
,
'sampling_id'
,
'gaussian_random_batch_size_like'
,
'sum'
,
'slice'
,
'shape'
'conv3d_transpose'
,
'sequence_expand'
,
'sequence_expand_as'
,
'sequence_pad'
,
'lstm_unit'
,
'reduce_sum'
,
'reduce_mean'
,
'reduce_max'
,
'reduce_min'
,
'reduce_prod'
,
'sequence_first_step'
,
'sequence_last_step'
,
'dropout'
,
'split'
,
'ctc_greedy_decoder'
,
'edit_distance'
,
'l2_normalize'
,
'matmul'
,
'topk'
,
'warpctc'
,
'sequence_reshape'
,
'transpose'
,
'im2sequence'
,
'nce'
,
'hsigmoid'
,
'beam_search'
,
'row_conv'
,
'multiplex'
,
'layer_norm'
,
'softmax_with_cross_entropy'
,
'smooth_l1'
,
'one_hot'
,
'autoincreased_step_counter'
,
'reshape'
,
'squeeze'
,
'unsqueeze'
,
'lod_reset'
,
'lrn'
,
'pad'
,
'pad_constant_like'
,
'label_smooth'
,
'roi_pool'
,
'dice_loss'
,
'image_resize'
,
'image_resize_short'
,
'resize_bilinear'
,
'gather'
,
'scatter'
,
'sequence_scatter'
,
'random_crop'
,
'mean_iou'
,
'relu'
,
'log'
,
'crop'
,
'rank_loss'
,
'elu'
,
'relu6'
,
'pow'
,
'stanh'
,
'hard_sigmoid'
,
'swish'
,
'prelu'
,
'brelu'
,
'leaky_relu'
,
'soft_relu'
,
'flatten'
,
'sequence_mask'
,
'stack'
,
'pad2d'
,
'unstack'
,
'sequence_enumerate'
,
'expand'
,
'sequence_concat'
,
'scale'
,
'elementwise_add'
,
'elementwise_div'
,
'elementwise_sub'
,
'elementwise_mul'
,
'elementwise_max'
,
'elementwise_min'
,
'elementwise_pow'
,
]
]
...
@@ -6463,6 +6382,246 @@ def expand(x, expand_times, name=None):
...
@@ -6463,6 +6382,246 @@ def expand(x, expand_times, name=None):
return
out
return
out
from
paddle.fluid.framework
import
convert_np_dtype_to_dtype_
@
templatedoc
()
def
uniform_random_batch_size_like
(
input
,
shape
,
dtype
=
'float32'
,
input_dim_idx
=
0
,
output_dim_idx
=
0
,
min
=-
1.0
,
max
=
1.0
,
seed
=
0
):
"""
${comment}
Args:
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (Int): ${input_dim_idx_comment}
output_dim_idx (Int): ${output_dim_idx_comment}
min (Float): ${min_comment}
max (Float): ${max_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc
Returns:
out (Variable): ${out_comment}
"""
helper
=
LayerHelper
(
'uniform_random_batch_size_like'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
)
c_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'uniform_random_batch_size_like'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'shape'
:
shape
,
'input_dim_idx'
:
input_dim_idx
,
'output_dim_idx'
:
output_dim_idx
,
'min'
:
min
,
'max'
:
max
,
'seed'
:
seed
,
'dtype'
:
c_dtype
})
return
out
@
templatedoc
()
def
gaussian_random
(
shape
,
mean
=
0.0
,
std
=
1.0
,
seed
=
0
,
dtype
=
'float32'
,
use_mkldnn
=
False
):
"""
${comment}
Args:
shape (tuple|list): ${shape_comment}
mean (Float): ${mean_comment}
std (Float): ${std_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): Output data type.
use_mkldnn (Bool): Only used in mkldnn kernel.
Returns:
out (Variable): ${out_comment}
"""
helper
=
LayerHelper
(
'gaussian_random'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
)
c_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random'
,
outputs
=
{
'Out'
:
out
},
attrs
=
{
'shape'
:
shape
,
'mean'
:
mean
,
'std'
:
std
,
'seed'
:
seed
,
'dtype'
:
c_dtype
,
'use_mkldnn'
:
use_mkldnn
})
return
out
@
templatedoc
()
def
sampling_id
(
x
,
min
=
0.0
,
max
=
1.0
,
seed
=
0
,
dtype
=
'float32'
):
"""
${comment}
Args:
x (Variable): ${x_comment}
min (Float): ${min_comment}
max (Float): ${max_comment}
seed (Float): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
out (Variable): ${out_comment}
"""
helper
=
LayerHelper
(
'sampling_id'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
)
helper
.
append_op
(
type
=
'sampling_id'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'min'
:
min
,
'max'
:
max
,
'seed'
:
seed
})
return
out
@
templatedoc
()
def
gaussian_random_batch_size_like
(
input
,
shape
,
input_dim_idx
=
0
,
output_dim_idx
=
0
,
mean
=
0.0
,
std
=
1.0
,
seed
=
0
,
dtype
=
'float32'
):
"""
${comment}
Args:
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (Int): ${input_dim_idx_comment}
output_dim_idx (Int): ${output_dim_idx_comment}
mean (Float): ${mean_comment}
std (Float): ${std_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
out (Variable): ${out_comment}
"""
helper
=
LayerHelper
(
'gaussian_random_batch_size_like'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
)
c_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random_batch_size_like'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'shape'
:
shape
,
'input_dim_idx'
:
input_dim_idx
,
'output_dim_idx'
:
output_dim_idx
,
'mean'
:
mean
,
'std'
:
std
,
'seed'
:
seed
,
'dtype'
:
c_dtype
})
return
out
@
templatedoc
()
def
sum
(
x
,
use_mkldnn
=
False
):
"""
${comment}
Args:
x (Variable): ${x_comment}
use_mkldnn (Bool): ${use_mkldnn_comment}
Returns:
out (Variable): ${out_comment}
"""
helper
=
LayerHelper
(
'sum'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
(
'x'
))
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'use_mkldnn'
:
use_mkldnn
})
return
out
@
templatedoc
()
def
slice
(
input
,
axes
,
starts
,
ends
):
"""
${comment}
Args:
input (Variable): ${input_comment}.
axes (List): ${axes_comment}
starts (List): ${starts_comment}
ends (List): ${ends_comment}
Returns:
out (Variable): ${out_comment}
"""
helper
=
LayerHelper
(
'slice'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
(
'input'
))
helper
.
append_op
(
type
=
'slice'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axes'
:
axes
,
'starts'
:
starts
,
'ends'
:
ends
})
return
out
@
templatedoc
()
def
shape
(
input
):
"""
${comment}
Args:
input (Variable): ${input_comment}
Returns:
out (Variable): ${out_comment}
"""
helper
=
LayerHelper
(
'shape'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
(
'input'
))
helper
.
append_op
(
type
=
'shape'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
})
return
out
def
_elementwise_op
(
helper
):
def
_elementwise_op
(
helper
):
op_type
=
helper
.
layer_type
op_type
=
helper
.
layer_type
x
=
helper
.
kwargs
.
get
(
'x'
,
None
)
x
=
helper
.
kwargs
.
get
(
'x'
,
None
)
...
...
python/paddle/fluid/layers/ops.py
浏览文件 @
008ab086
...
@@ -45,13 +45,6 @@ __all__ = [
...
@@ -45,13 +45,6 @@ __all__ = [
'logical_or'
,
'logical_or'
,
'logical_xor'
,
'logical_xor'
,
'logical_not'
,
'logical_not'
,
'uniform_random_batch_size_like'
,
'gaussian_random'
,
'sampling_id'
,
'gaussian_random_batch_size_like'
,
'sum'
,
'slice'
,
'shape'
,
'maxout'
,
'maxout'
,
]
]
...
...
python/paddle/fluid/tests/unittests/dist_transformer.py
浏览文件 @
008ab086
...
@@ -1488,7 +1488,7 @@ def wrap_decoder(trg_vocab_size,
...
@@ -1488,7 +1488,7 @@ def wrap_decoder(trg_vocab_size,
if
weight_sharing
:
if
weight_sharing
:
predict
=
layers
.
matmul
(
predict
=
layers
.
matmul
(
x
=
dec_output
,
x
=
dec_output
,
y
=
fluid
.
get_var
(
word_emb_param_names
[
0
]),
y
=
fluid
.
framework
.
_
get_var
(
word_emb_param_names
[
0
]),
transpose_y
=
True
)
transpose_y
=
True
)
else
:
else
:
predict
=
layers
.
fc
(
input
=
dec_output
,
predict
=
layers
.
fc
(
input
=
dec_output
,
...
...
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
浏览文件 @
008ab086
...
@@ -264,6 +264,25 @@ class TestLRDecay(TranspilerTest):
...
@@ -264,6 +264,25 @@ class TestLRDecay(TranspilerTest):
])
])
class
TestDecayedAdagrad
(
TranspilerTest
):
def
net_conf
(
self
):
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
1000
],
dtype
=
'float32'
)
y_predict
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
1000
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
name
=
'fc_w'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
'fc_b'
))
y
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
cost
=
fluid
.
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
avg_cost
=
fluid
.
layers
.
mean
(
cost
)
opt
=
fluid
.
optimizer
.
DecayedAdagrad
(
learning_rate
=
0.1
)
opt
.
minimize
(
avg_cost
)
def
transpiler_test_impl
(
self
):
pserver
,
startup
=
self
.
get_pserver
(
self
.
pserver1_ep
)
trainer
,
_
=
self
.
get_trainer
()
class
TestLRDecayConditional
(
TranspilerTest
):
class
TestLRDecayConditional
(
TranspilerTest
):
def
net_conf
(
self
):
def
net_conf
(
self
):
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
1000
],
dtype
=
'float32'
)
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
1000
],
dtype
=
'float32'
)
...
...
python/paddle/fluid/tests/unittests/test_infer_shape.py
浏览文件 @
008ab086
...
@@ -76,8 +76,8 @@ class TestInferShape(unittest.TestCase):
...
@@ -76,8 +76,8 @@ class TestInferShape(unittest.TestCase):
mul_op_desc
.
set_input
(
"X"
,
[
"x"
])
mul_op_desc
.
set_input
(
"X"
,
[
"x"
])
mul_op_desc
.
set_input
(
"Y"
,
[
"y"
])
mul_op_desc
.
set_input
(
"Y"
,
[
"y"
])
mul_op_desc
.
set_output
(
"Out"
,
[
"out"
])
mul_op_desc
.
set_output
(
"Out"
,
[
"out"
])
mul_op_desc
.
set_attr
(
"x_num_col_dims"
,
1
)
mul_op_desc
.
_
set_attr
(
"x_num_col_dims"
,
1
)
mul_op_desc
.
set_attr
(
"y_num_col_dims"
,
1
)
mul_op_desc
.
_
set_attr
(
"y_num_col_dims"
,
1
)
mul_op_desc
.
check_attrs
()
mul_op_desc
.
check_attrs
()
mul_op_desc
.
infer_shape
(
block
)
mul_op_desc
.
infer_shape
(
block
)
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
008ab086
...
@@ -541,7 +541,7 @@ class TestBook(unittest.TestCase):
...
@@ -541,7 +541,7 @@ class TestBook(unittest.TestCase):
with
program_guard
(
program
):
with
program_guard
(
program
):
input
=
layers
.
data
(
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
out
=
layers
.
shape
(
input
,
name
=
"shape"
)
out
=
layers
.
shape
(
input
)
self
.
assertIsNotNone
(
out
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
print
(
str
(
program
))
...
@@ -758,6 +758,65 @@ class TestBook(unittest.TestCase):
...
@@ -758,6 +758,65 @@ class TestBook(unittest.TestCase):
out
=
layers
.
expand
(
x
,
[
1
,
2
])
out
=
layers
.
expand
(
x
,
[
1
,
2
])
print
(
str
(
program
))
print
(
str
(
program
))
def
test_uniform_random_batch_size_like
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
uniform_random_batch_size_like
(
input
,
[
-
1
,
11
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_gaussian_random
(
self
):
program
=
Program
()
with
program_guard
(
program
):
out
=
layers
.
gaussian_random
(
shape
=
[
20
,
30
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_sampling_id
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"X"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
,
append_batch_size
=
False
)
out
=
layers
.
sampling_id
(
x
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_gaussian_random_batch_size_like
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
gaussian_random_batch_size_like
(
input
,
shape
=
[
-
1
,
11
],
mean
=
1.0
,
std
=
2.0
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_sum
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
sum
(
input
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_slice
(
self
):
starts
=
[
1
,
0
,
2
]
ends
=
[
3
,
3
,
4
]
axes
=
[
0
,
1
,
2
]
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
'float32'
)
out
=
layers
.
slice
(
input
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
def
test_softshrink
(
self
):
def
test_softshrink
(
self
):
program
=
Program
()
program
=
Program
()
with
program_guard
(
program
):
with
program_guard
(
program
):
...
...
python/paddle/fluid/tests/unittests/test_protobuf_descs.py
浏览文件 @
008ab086
...
@@ -38,40 +38,40 @@ class TestOpDesc(unittest.TestCase):
...
@@ -38,40 +38,40 @@ class TestOpDesc(unittest.TestCase):
self
.
assertEqual
([
'z'
],
op
.
output
(
"Out"
))
self
.
assertEqual
([
'z'
],
op
.
output
(
"Out"
))
self
.
assertEqual
([
"Out"
],
op
.
output_names
())
self
.
assertEqual
([
"Out"
],
op
.
output_names
())
op
.
set_attr
(
"int_attr"
,
1
)
op
.
_
set_attr
(
"int_attr"
,
1
)
self
.
assertEqual
(
1
,
op
.
attr
(
"int_attr"
))
self
.
assertEqual
(
1
,
op
.
attr
(
"int_attr"
))
self
.
assertTrue
(
op
.
has_attr
(
"int_attr"
))
self
.
assertTrue
(
op
.
has_attr
(
"int_attr"
))
self
.
assertEqual
(
core
.
AttrType
.
INT
,
op
.
attr_type
(
"int_attr"
))
self
.
assertEqual
(
core
.
AttrType
.
INT
,
op
.
attr_type
(
"int_attr"
))
op
.
set_attr
(
"float_attr"
,
-
1.32
)
op
.
_
set_attr
(
"float_attr"
,
-
1.32
)
self
.
assertAlmostEqual
(
-
1.32
,
op
.
attr
(
"float_attr"
),
delta
=
1e-4
)
self
.
assertAlmostEqual
(
-
1.32
,
op
.
attr
(
"float_attr"
),
delta
=
1e-4
)
self
.
assertTrue
(
op
.
has_attr
(
"float_attr"
))
self
.
assertTrue
(
op
.
has_attr
(
"float_attr"
))
op
.
set_attr
(
"bool_attr"
,
False
)
op
.
_
set_attr
(
"bool_attr"
,
False
)
self
.
assertFalse
(
op
.
attr
(
"bool_attr"
))
self
.
assertFalse
(
op
.
attr
(
"bool_attr"
))
op
.
set_attr
(
"string_attr"
,
"abc"
)
op
.
_
set_attr
(
"string_attr"
,
"abc"
)
self
.
assertEqual
(
"abc"
,
op
.
attr
(
"string_attr"
))
self
.
assertEqual
(
"abc"
,
op
.
attr
(
"string_attr"
))
self
.
assertTrue
(
op
.
has_attr
(
"string_attr"
))
self
.
assertTrue
(
op
.
has_attr
(
"string_attr"
))
op
.
set_attr
(
"ints_attr"
,
[
1
,
2
,
3
])
op
.
_
set_attr
(
"ints_attr"
,
[
1
,
2
,
3
])
self
.
assertEqual
([
1
,
2
,
3
],
op
.
attr
(
"ints_attr"
))
self
.
assertEqual
([
1
,
2
,
3
],
op
.
attr
(
"ints_attr"
))
expected
=
[
1.2
,
2.3
,
3.4
]
expected
=
[
1.2
,
2.3
,
3.4
]
op
.
set_attr
(
"floats_attr"
,
expected
)
op
.
_
set_attr
(
"floats_attr"
,
expected
)
for
e
,
a
in
zip
(
expected
,
op
.
attr
(
"floats_attr"
)):
for
e
,
a
in
zip
(
expected
,
op
.
attr
(
"floats_attr"
)):
self
.
assertAlmostEqual
(
e
,
a
,
delta
=
1e-4
)
self
.
assertAlmostEqual
(
e
,
a
,
delta
=
1e-4
)
op
.
set_attr
(
"strings_attr"
,
[
"a"
,
"b"
,
"c"
])
op
.
_
set_attr
(
"strings_attr"
,
[
"a"
,
"b"
,
"c"
])
self
.
assertEqual
([
"a"
,
"b"
,
"c"
],
op
.
attr
(
"strings_attr"
))
self
.
assertEqual
([
"a"
,
"b"
,
"c"
],
op
.
attr
(
"strings_attr"
))
op
.
set_attr
(
"bools_attr"
,
[
True
,
False
,
True
])
op
.
_
set_attr
(
"bools_attr"
,
[
True
,
False
,
True
])
self
.
assertEqual
([
True
,
False
,
True
],
op
.
attr
(
"bools_attr"
))
self
.
assertEqual
([
True
,
False
,
True
],
op
.
attr
(
"bools_attr"
))
self
.
assertEqual
(
8
,
len
(
op
.
attr_names
()))
self
.
assertEqual
(
8
,
len
(
op
.
attr_names
()))
op
.
set_block_attr
(
"block_attr"
,
program_desc
.
block
(
0
))
op
.
set_block_attr
(
"
_
block_attr"
,
program_desc
.
block
(
0
))
self
.
assertEqual
(
0
,
op
.
block_attr_id
(
"
block_attr"
))
self
.
assertEqual
(
0
,
op
.
_block_attr_id
(
"_
block_attr"
))
mul_op
=
block
.
append_op
()
mul_op
=
block
.
append_op
()
mul_op
.
set_type
(
"mul"
)
mul_op
.
set_type
(
"mul"
)
...
...
python/paddle/fluid/transpiler/details/program_utils.py
浏览文件 @
008ab086
...
@@ -128,7 +128,7 @@ def op_to_code(op):
...
@@ -128,7 +128,7 @@ def op_to_code(op):
attr_type
=
op
.
desc
.
attr_type
(
name
)
attr_type
=
op
.
desc
.
attr_type
(
name
)
if
attr_type
==
core
.
AttrType
.
BLOCK
:
if
attr_type
==
core
.
AttrType
.
BLOCK
:
a
=
"{name} = block[{value}]"
.
format
(
a
=
"{name} = block[{value}]"
.
format
(
name
=
name
,
type
=
attr_type
,
value
=
op
.
block_attr_id
(
name
))
name
=
name
,
type
=
attr_type
,
value
=
op
.
_
block_attr_id
(
name
))
attrs_str
+=
a
attrs_str
+=
a
if
i
!=
len
(
attr_names
)
-
1
:
if
i
!=
len
(
attr_names
)
-
1
:
attrs_str
+=
", "
attrs_str
+=
", "
...
@@ -136,7 +136,7 @@ def op_to_code(op):
...
@@ -136,7 +136,7 @@ def op_to_code(op):
if
attr_type
==
core
.
AttrType
.
BLOCKS
:
if
attr_type
==
core
.
AttrType
.
BLOCKS
:
a
=
"{name} = blocks{value}"
.
format
(
a
=
"{name} = blocks{value}"
.
format
(
name
=
name
,
type
=
attr_type
,
value
=
op
.
blocks_attr_ids
(
name
))
name
=
name
,
type
=
attr_type
,
value
=
op
.
_
blocks_attr_ids
(
name
))
attrs_str
+=
a
attrs_str
+=
a
if
i
!=
len
(
attr_names
)
-
1
:
if
i
!=
len
(
attr_names
)
-
1
:
attrs_str
+=
", "
attrs_str
+=
", "
...
...
python/paddle/fluid/transpiler/distribute_transpiler.py
浏览文件 @
008ab086
...
@@ -470,7 +470,10 @@ class DistributeTranspiler(object):
...
@@ -470,7 +470,10 @@ class DistributeTranspiler(object):
"""
"""
# remove optimize ops and add a send op to main_program
# remove optimize ops and add a send op to main_program
# FIXME(typhoonzero): Also ops like clip_gradient, lrn_decay?
# FIXME(typhoonzero): Also ops like clip_gradient, lrn_decay?
lr_ops
=
self
.
_get_lr_ops
()
delete_ops
(
self
.
origin_program
.
global_block
(),
self
.
optimize_ops
)
delete_ops
(
self
.
origin_program
.
global_block
(),
self
.
optimize_ops
)
delete_ops
(
self
.
origin_program
.
global_block
(),
lr_ops
)
self
.
origin_program
.
__str__
()
self
.
origin_program
.
__str__
()
if
wait_port
:
if
wait_port
:
...
@@ -668,7 +671,7 @@ in a single call.")
...
@@ -668,7 +671,7 @@ in a single call.")
__clone_lr_op_sub_block__
(
cloned_op
,
program
,
new_sub_block
)
__clone_lr_op_sub_block__
(
cloned_op
,
program
,
new_sub_block
)
# reset the block of op
# reset the block of op
op
.
set_attr
(
'sub_block'
,
new_sub_block
)
op
.
_
set_attr
(
'sub_block'
,
new_sub_block
)
# append lr decay ops to the child block if exists
# append lr decay ops to the child block if exists
lr_ops
=
self
.
_get_lr_ops
()
lr_ops
=
self
.
_get_lr_ops
()
...
@@ -862,7 +865,7 @@ to transpile() call.")
...
@@ -862,7 +865,7 @@ to transpile() call.")
if
op
.
type
in
[
if
op
.
type
in
[
"gaussian_random"
,
"fill_constant"
,
"uniform_random"
"gaussian_random"
,
"fill_constant"
,
"uniform_random"
]:
]:
op
.
set_attr
(
"shape"
,
list
(
new_outputs
[
"Out"
].
shape
))
op
.
_
set_attr
(
"shape"
,
list
(
new_outputs
[
"Out"
].
shape
))
s_prog
.
global_block
().
append_op
(
s_prog
.
global_block
().
append_op
(
type
=
op
.
type
,
type
=
op
.
type
,
inputs
=
new_inputs
,
inputs
=
new_inputs
,
...
@@ -1428,6 +1431,9 @@ to transpile() call.")
...
@@ -1428,6 +1431,9 @@ to transpile() call.")
elif
op_type
==
"rmsprop"
:
elif
op_type
==
"rmsprop"
:
if
varkey
in
[
"Moment"
,
"MeanSquare"
]:
if
varkey
in
[
"Moment"
,
"MeanSquare"
]:
return
param_shape
return
param_shape
elif
op_type
==
"decayed_adagrad"
:
if
varkey
==
"Moment"
:
return
param_shape
elif
op_type
==
"sgd"
:
elif
op_type
==
"sgd"
:
pass
pass
return
orig_shape
return
orig_shape
...
...
python/paddle/fluid/transpiler/inference_transpiler.py
浏览文件 @
008ab086
...
@@ -163,7 +163,7 @@ class InferenceTranspiler(object):
...
@@ -163,7 +163,7 @@ class InferenceTranspiler(object):
next_op
=
self
.
block
.
ops
[
i
+
1
]
next_op
=
self
.
block
.
ops
[
i
+
1
]
if
next_op
.
type
==
'relu'
:
if
next_op
.
type
==
'relu'
:
# modify bnorm OP to include relu
# modify bnorm OP to include relu
current_op
.
set_attr
(
"fuse_with_relu"
,
True
)
current_op
.
_
set_attr
(
"fuse_with_relu"
,
True
)
# remove relu OP
# remove relu OP
self
.
block
.
_remove_op
(
i
+
1
)
self
.
block
.
_remove_op
(
i
+
1
)
i
=
i
+
1
i
=
i
+
1
...
@@ -377,7 +377,7 @@ class InferenceTranspiler(object):
...
@@ -377,7 +377,7 @@ class InferenceTranspiler(object):
type
=
old_var
.
type
,
type
=
old_var
.
type
,
dtype
=
old_var
.
dtype
,
dtype
=
old_var
.
dtype
,
shape
=
old_var
.
shape
)
shape
=
old_var
.
shape
)
op
.
rename_input
(
old_param_name
,
new_param_name
)
op
.
_
rename_input
(
old_param_name
,
new_param_name
)
self
.
scope
.
var
(
new_param_name
)
self
.
scope
.
var
(
new_param_name
)
tensor
=
self
.
scope
.
find_var
(
new_param_name
).
get_tensor
()
tensor
=
self
.
scope
.
find_var
(
new_param_name
).
get_tensor
()
...
@@ -463,8 +463,8 @@ class InferenceTranspiler(object):
...
@@ -463,8 +463,8 @@ class InferenceTranspiler(object):
current_op
=
self
.
block
.
ops
[
i
]
current_op
=
self
.
block
.
ops
[
i
]
for
input_arg
in
current_op
.
input_arg_names
:
for
input_arg
in
current_op
.
input_arg_names
:
if
input_arg
in
self
.
input_map
:
if
input_arg
in
self
.
input_map
:
current_op
.
rename_input
(
input_arg
,
current_op
.
_
rename_input
(
input_arg
,
self
.
input_map
[
input_arg
])
self
.
input_map
[
input_arg
])
def
_remove_unused_var
(
self
):
def
_remove_unused_var
(
self
):
'''
'''
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录