Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
d243e555
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d243e555
编写于
1月 23, 2019
作者:
乔
乔龙飞 Qiao Longfei
提交者:
GitHub
1月 23, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #15080 from jacquesqiao/optimize-assign
Optimize assign
上级
dbdaf15c
6833ec06
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
98 addition
and
10 deletion
+98
-10
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-0
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+60
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+16
-9
python/paddle/fluid/tests/unittests/test_initializer.py
python/paddle/fluid/tests/unittests/test_initializer.py
+21
-0
未找到文件。
paddle/fluid/API.spec
浏览文件 @
d243e555
...
@@ -67,6 +67,7 @@ paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], var
...
@@ -67,6 +67,7 @@ paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], var
paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0))
paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0))
paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None))
paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None))
paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32'))
paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32'))
paddle.fluid.layers.dynamic_lstm ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None))
paddle.fluid.layers.dynamic_lstm ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None))
...
...
python/paddle/fluid/initializer.py
浏览文件 @
d243e555
...
@@ -24,7 +24,8 @@ __all__ = [
...
@@ -24,7 +24,8 @@ __all__ = [
'Constant'
,
'Uniform'
,
'Normal'
,
'TruncatedNormal'
,
'Xavier'
,
'Bilinear'
,
'Constant'
,
'Uniform'
,
'Normal'
,
'TruncatedNormal'
,
'Xavier'
,
'Bilinear'
,
'MSRA'
,
'force_init_on_cpu'
,
'init_on_cpu'
,
'ConstantInitializer'
,
'MSRA'
,
'force_init_on_cpu'
,
'init_on_cpu'
,
'ConstantInitializer'
,
'UniformInitializer'
,
'NormalInitializer'
,
'TruncatedNormalInitializer'
,
'UniformInitializer'
,
'NormalInitializer'
,
'TruncatedNormalInitializer'
,
'XavierInitializer'
,
'BilinearInitializer'
,
'MSRAInitializer'
'XavierInitializer'
,
'BilinearInitializer'
,
'MSRAInitializer'
,
'NumpyArrayInitializer'
]
]
_force_init_on_cpu_
=
False
_force_init_on_cpu_
=
False
...
@@ -683,6 +684,64 @@ class BilinearInitializer(Initializer):
...
@@ -683,6 +684,64 @@ class BilinearInitializer(Initializer):
return
op
return
op
class
NumpyArrayInitializer
(
Initializer
):
"""Init an parameter with an numpy array
Args:
value (numpy): numpy array to initialize the variable
Examples:
.. code-block:: python
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))
"""
def
__init__
(
self
,
value
):
import
numpy
assert
isinstance
(
value
,
numpy
.
ndarray
)
super
(
NumpyArrayInitializer
,
self
).
__init__
()
self
.
_value
=
value
def
__call__
(
self
,
var
,
block
):
"""Add constant initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert
isinstance
(
var
,
framework
.
Variable
)
assert
isinstance
(
block
,
framework
.
Block
)
# Initialization Ops should be prepended and not appended
dtype
=
framework
.
convert_np_dtype_to_dtype_
(
self
.
_value
.
dtype
)
if
dtype
==
VarDesc
.
VarType
.
FP32
:
value_name
=
"fp32_values"
values
=
[
float
(
v
)
for
v
in
self
.
_value
.
flat
]
elif
dtype
==
VarDesc
.
VarType
.
INT32
:
value_name
=
"int32_values"
values
=
[
int
(
v
)
for
v
in
self
.
_value
.
flat
]
else
:
raise
ValueError
(
"Unsupported dtype %s"
,
self
.
_value
.
dtype
)
if
self
.
_value
.
size
>
1024
*
1024
*
5
:
raise
ValueError
(
"The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it"
)
op
=
block
.
_prepend_op
(
type
=
'assign_value'
,
outputs
=
{
'Out'
:
var
},
attrs
=
{
'dtype'
:
dtype
,
'shape'
:
list
(
self
.
_value
.
shape
),
value_name
:
values
},
stop_gradient
=
True
)
var
.
op
=
op
return
op
# We short the class name, since users will use the initializer with the package
# We short the class name, since users will use the initializer with the package
# name. The sample code:
# name. The sample code:
#
#
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
d243e555
...
@@ -22,7 +22,7 @@ import six
...
@@ -22,7 +22,7 @@ import six
import
os
import
os
import
inspect
import
inspect
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
from
..initializer
import
Normal
,
Constant
from
..initializer
import
Normal
,
Constant
,
NumpyArrayInitializer
from
..framework
import
Variable
,
OpProtoHolder
from
..framework
import
Variable
,
OpProtoHolder
from
..param_attr
import
ParamAttr
from
..param_attr
import
ParamAttr
from
.layer_function_generator
import
autodoc
,
templatedoc
,
_generate_doc_string_
from
.layer_function_generator
import
autodoc
,
templatedoc
,
_generate_doc_string_
...
@@ -5181,14 +5181,21 @@ def nce(input,
...
@@ -5181,14 +5181,21 @@ def nce(input,
alias_probs_
[
little
[
0
]]
=
1.0
alias_probs_
[
little
[
0
]]
=
1.0
alias_
[
little
[
0
]]
=
-
1
alias_
[
little
[
0
]]
=
-
1
probs
=
assign
(
input
=
np
.
array
(
custom_dist
).
astype
(
'float32'
))
def
_init_by_numpy_array
(
numpy_array
):
custom_alias
=
assign
(
input
=
np
.
array
(
alias_
).
astype
(
'int32'
))
ret
=
helper
.
create_parameter
(
custom_alias_probs
=
assign
(
attr
=
ParamAttr
(),
input
=
np
.
array
(
alias_probs_
).
astype
(
'float32'
))
shape
=
numpy_array
.
shape
,
dtype
=
numpy_array
.
dtype
,
inputs
[
'CustomDistProbs'
]
=
probs
default_initializer
=
NumpyArrayInitializer
(
numpy_array
))
inputs
[
'CustomDistAlias'
]
=
custom_alias
ret
.
stop_gradient
=
True
inputs
[
'CustomDistAliasProbs'
]
=
custom_alias_probs
return
ret
inputs
[
'CustomDistProbs'
]
=
_init_by_numpy_array
(
np
.
array
(
custom_dist
).
astype
(
'float32'
))
inputs
[
'CustomDistAlias'
]
=
_init_by_numpy_array
(
np
.
array
(
alias_
).
astype
(
'int32'
))
inputs
[
'CustomDistAliasProbs'
]
=
_init_by_numpy_array
(
np
.
array
(
alias_probs_
).
astype
(
'float32'
))
sampler
=
2
sampler
=
2
else
:
else
:
raise
Exception
(
"Unsupported sampler type."
)
raise
Exception
(
"Unsupported sampler type."
)
...
...
python/paddle/fluid/tests/unittests/test_initializer.py
浏览文件 @
d243e555
...
@@ -420,5 +420,26 @@ class TestMSRAInitializer(unittest.TestCase):
...
@@ -420,5 +420,26 @@ class TestMSRAInitializer(unittest.TestCase):
self
.
assertEqual
(
init_op
.
type
,
'assign_value'
)
self
.
assertEqual
(
init_op
.
type
,
'assign_value'
)
class
TestNumpyArrayInitializer
(
unittest
.
TestCase
):
def
test_numpy_array_initializer
(
self
):
"""Test the numpy array initializer with supplied arguments
"""
import
numpy
program
=
framework
.
Program
()
block
=
program
.
global_block
()
np_array
=
numpy
.
random
.
random
((
10000
)).
astype
(
"float32"
)
for
_
in
range
(
2
):
block
.
create_parameter
(
dtype
=
np_array
.
dtype
,
shape
=
np_array
.
shape
,
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
NumpyArrayInitializer
(
np_array
))
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'assign_value'
)
assert
(
init_op
.
attr
(
'fp32_values'
)
==
np_array
).
all
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录