未验证 提交 d243e555 编写于 作者: 乔龙飞 Qiao Longfei 提交者: GitHub

Merge pull request #15080 from jacquesqiao/optimize-assign

Optimize assign
......@@ -67,6 +67,7 @@ paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], var
paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0))
paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None))
paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32'))
paddle.fluid.layers.dynamic_lstm ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None))
......
......@@ -24,7 +24,8 @@ __all__ = [
'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',
'MSRA', 'force_init_on_cpu', 'init_on_cpu', 'ConstantInitializer',
'UniformInitializer', 'NormalInitializer', 'TruncatedNormalInitializer',
'XavierInitializer', 'BilinearInitializer', 'MSRAInitializer'
'XavierInitializer', 'BilinearInitializer', 'MSRAInitializer',
'NumpyArrayInitializer'
]
_force_init_on_cpu_ = False
......@@ -683,6 +684,64 @@ class BilinearInitializer(Initializer):
return op
class NumpyArrayInitializer(Initializer):
"""Init an parameter with an numpy array
Args:
value (numpy): numpy array to initialize the variable
Examples:
.. code-block:: python
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))
"""
def __init__(self, value):
import numpy
assert isinstance(value, numpy.ndarray)
super(NumpyArrayInitializer, self).__init__()
self._value = value
def __call__(self, var, block):
"""Add constant initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
# Initialization Ops should be prepended and not appended
dtype = framework.convert_np_dtype_to_dtype_(self._value.dtype)
if dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in self._value.flat]
elif dtype == VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in self._value.flat]
else:
raise ValueError("Unsupported dtype %s", self._value.dtype)
if self._value.size > 1024 * 1024 * 5:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
op = block._prepend_op(
type='assign_value',
outputs={'Out': var},
attrs={
'dtype': dtype,
'shape': list(self._value.shape),
value_name: values
},
stop_gradient=True)
var.op = op
return op
# We short the class name, since users will use the initializer with the package
# name. The sample code:
#
......
......@@ -22,7 +22,7 @@ import six
import os
import inspect
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
......@@ -5181,14 +5181,21 @@ def nce(input,
alias_probs_[little[0]] = 1.0
alias_[little[0]] = -1
probs = assign(input=np.array(custom_dist).astype('float32'))
custom_alias = assign(input=np.array(alias_).astype('int32'))
custom_alias_probs = assign(
input=np.array(alias_probs_).astype('float32'))
inputs['CustomDistProbs'] = probs
inputs['CustomDistAlias'] = custom_alias
inputs['CustomDistAliasProbs'] = custom_alias_probs
def _init_by_numpy_array(numpy_array):
ret = helper.create_parameter(
attr=ParamAttr(),
shape=numpy_array.shape,
dtype=numpy_array.dtype,
default_initializer=NumpyArrayInitializer(numpy_array))
ret.stop_gradient = True
return ret
inputs['CustomDistProbs'] = _init_by_numpy_array(
np.array(custom_dist).astype('float32'))
inputs['CustomDistAlias'] = _init_by_numpy_array(
np.array(alias_).astype('int32'))
inputs['CustomDistAliasProbs'] = _init_by_numpy_array(
np.array(alias_probs_).astype('float32'))
sampler = 2
else:
raise Exception("Unsupported sampler type.")
......
......@@ -420,5 +420,26 @@ class TestMSRAInitializer(unittest.TestCase):
self.assertEqual(init_op.type, 'assign_value')
class TestNumpyArrayInitializer(unittest.TestCase):
def test_numpy_array_initializer(self):
"""Test the numpy array initializer with supplied arguments
"""
import numpy
program = framework.Program()
block = program.global_block()
np_array = numpy.random.random((10000)).astype("float32")
for _ in range(2):
block.create_parameter(
dtype=np_array.dtype,
shape=np_array.shape,
lod_level=0,
name="param",
initializer=initializer.NumpyArrayInitializer(np_array))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
assert (init_op.attr('fp32_values') == np_array).all()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册