提交 ff478417 编写于 作者: G gongweibao

fix

上级 8f09109a
......@@ -53,9 +53,9 @@ class SamplingIdOpMaker : public framework::OpProtoAndCheckerMaker {
SamplingId Operator.
A layer for sampling id from multinomial distribution from the
input. Sampling one id for one sample.)DOC");
AddAttr<float>("min", "Minimum value of random. [default 0.0].")
AddAttr<float>("min", "Minimum value of random. (float, default 0.0).")
.SetDefault(0.0f);
AddAttr<float>("max", "Maximun value of random. [default 1.0].")
AddAttr<float>("max", "Maximun value of random. (float, default 1.0).")
.SetDefault(1.0f);
AddAttr<int>("seed",
"Random seed used for the random number engine. "
......
......@@ -47,7 +47,8 @@ __all__ = [
'gather', 'scatter', 'sequence_scatter', 'random_crop', 'mean_iou', 'relu',
'log', 'crop', 'rank_loss', 'prelu', 'flatten', 'sequence_mask', 'stack',
'pad2d', 'unstack', 'sequence_enumerate', 'expand', 'sequence_concat',
'uniform_random_batch_size_like'
'uniform_random_batch_size_like', 'gaussian_random', 'sampling_id',
'gaussian_random_batch_size_like', 'sum', 'slice', 'shape'
]
......@@ -6195,7 +6196,7 @@ def uniform_random_batch_size_like(input,
Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc
Returns:
output(Variable): Output of this operator.
out (Variable): Output of this operator.
"""
......@@ -6217,3 +6218,230 @@ def uniform_random_batch_size_like(input,
})
return out
def gaussian_random(shape,
mean=0.0,
std=1.0,
seed=0,
dtype='float32',
use_mkldnn=False):
"""
GaussianRandom Operator.
Used to initialize tensors with gaussian random generator.
Args:
shape (tuple|list): The dimension of random tensor.
mean (Float): Mean of random tensor.
std (Float): Std of random tensor.
seed (Int): Random seed of generator.0 means use system wide seed.
Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str): Output data type.
use_mkldnn (Bool): Only used in mkldnn kernel.
Returns:
out (Variable): Output of this operator.
"""
helper = LayerHelper('gaussian_random', **locals())
out = helper.create_tmp_variable(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random',
outputs={'Out': out},
attrs={
'shape': shape,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype,
'use_mkldnn': use_mkldnn
})
return out
def sampling_id(x, min=0.0, max=1.0, seed=0):
"""
SamplingId Operator.
A layer for sampling id from multinomial distribution from the input.
Sampling one id for one sample.
Args:
x (Variable): The input tensor of softmax. 2-D with shape [batch_size, input_feature_dimensions].
min (Float): Minimum value of random.
max (Float): Maximun value of random.
seed (Float): random seed used for the random number engine.0 means use a seed generated by the system.
Note that if seed is not 0, this operator will always generate the same random numbers every time.
Returns:
out (Variable): Output of this operator.
"""
helper = LayerHelper('sampling_id', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype('x'))
helper.append_op(
type='sampling_id',
inputs={'X': x},
outputs={'Out': out},
attrs={'min': min,
'max': max,
'seed': seed})
return out
def gaussian_random_batch_size_like(input,
shape,
input_dim_idx=0,
output_dim_idx=0,
mean=0.0,
std=1.0,
seed=0,
dtype='float32'):
"""
Used to initialize tensors with gaussian random generator. The defalut mean of the distribution is 0. and defalut standard deviation (std) of the distribution is 1.. Uers can set mean and std by input arguments.
Args:
input (Variable): Tensor whose input_dim_idx'th dimension specifies the batch_size.
shape (tuple|list): the shape of the output.
input_dim_idx (Int): The index of input's batch size dimension
output_dim_idx (Int): The index of output's batch size dimension
mean (Float): The mean (or center) of the gaussian distribution.
std (Float): The standard deviation (std, or spread) of the gaussian distribution.
seed (Int): Random seed of generator.0 means use system wide seed._note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str): Output data type.
Returns:
out (Variable): Output of this operator
"""
helper = LayerHelper('gaussian_random_batch_size_like', **locals())
out = helper.create_tmp_variable(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype
})
return out
def sum(x, use_mkldnn=False):
"""
Sum operator.
This operators sums the input tensors. All the inputs can carry
the LoD (Level of Details) information. However, the output only
shares the LoD information with the first input.
Args:
x (Variable): The input tensors of sum operator.
use_mkldnn (Bool): Only used in mkldnn kernel
Returns:
out (Variable): Output of this operator
"""
helper = LayerHelper('sum', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype('X'))
helper.append_op(
type='sum',
inputs={'X': x},
outputs={'Out': out},
attrs={'use_mkldnn': use_mkldnn})
return out
def slice(input, axes, starts, ends):
"""
Slice Operator.
Produces a slice of the input tensor along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses `axes`, `starts` and `ends` attributes to specify the start and
end dimension for each axis in the list of axes, it uses this information
to slice the input data tensor. If a negative value is passed for any of
the start or end indices, it represents number of elements before the end
of that dimension. If the value passed to start or end is larger than
the n (the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. If axes are omitted, they are set to [0, ..., ndim-1].
Following examples will explain how slice works:
.. code-block:: text
Cast1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Cast2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
starts = [0, 1]
ends = [-1, 1000]
Then:
result = [ [2, 3, 4], ]
Args:
input (Variable): Tensor of data to extract slices from.
axes (List): Axes that `starts` and `ends` apply to. It's optional._if not present, will be treated as [0, 1, ..., len(`starts`) - 1].
starts (List): Starting indices of corresponding axis in `axes`.
ends (List): Starting indices of corresponding axis in `axes`.
Returns:
out (Variable): The output of this operator.
"""
helper = LayerHelper('slice', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype('Input'))
helper.append_op(
type='slice',
inputs={'Input': input},
outputs={'Out': out},
attrs={'axes': axes,
'starts': starts,
'ends': ends})
return out
def shape(input):
"""
Shape Operator
Get the shape of input tensor. Only support CPU input Tensor now.
Args:
input (Variable): The input tensor.
Returns:
out (Variable): The output of this operator.
"""
helper = LayerHelper('shape', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype('Input'))
helper.append_op(
type='shape', inputs={'Input': input}, outputs={'Out': out}, attrs={})
return out
......@@ -62,12 +62,6 @@ __all__ = [
'logical_or',
'logical_xor',
'logical_not',
'gaussian_random',
'sampling_id',
'gaussian_random_batch_size_like',
'sum',
'slice',
'shape',
'maxout',
] + __activations__
......
......@@ -541,7 +541,7 @@ class TestBook(unittest.TestCase):
with program_guard(program):
input = layers.data(
name="input", shape=[3, 100, 100], dtype="float32")
out = layers.shape(input, name="shape")
out = layers.shape(input)
self.assertIsNotNone(out)
print(str(program))
......@@ -597,11 +597,54 @@ class TestBook(unittest.TestCase):
print(str(program))
def test_uniform_random_batch_size_like(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[13, 11], dtype='float32')
out = layers.uniform_random_batch_size_like(input, [-1, 11])
self.assertIsNotNone(out)
def test_gaussian_random(self):
program = Program()
with program_guard(program):
out = layers.gaussian_random(shape=[20, 30])
self.assertIsNotNone(out)
def test_sampling_id(self):
program = Program()
with program_guard(program):
x = layers.data(name="X", shape=[13, 11], dtype='float32')
out = layers.sampling_id(x)
self.assertIsNotNone(out)
def test_gaussian_random_batch_size_like(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[13, 11], dtype='float32')
out = layers.gaussian_random_batch_size_like(
input, shape=[-1, 11], mean=1.0, std=2.0)
self.assertIsNotNone(out)
def test_sum(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[13, 11], dtype='float32')
out = layers.sum(input)
self.assertIsNotNone(out)
def test_slice(self):
starts = [1, 0, 2]
ends = [3, 3, 4]
axes = [0, 1, 2]
program = Program()
with program_guard(program):
input = layers.data(
name="input", shape=[500, 2000], dtype='float32')
out = layers.uniform_random_batch_size_like(input, [-1, 2000])
name="input", shape=[3, 4, 5, 6], dtype='float32')
out = layers.slice(input, axes=axes, starts=starts, ends=ends)
self.assertIsNotNone(out)
......
......@@ -190,6 +190,8 @@ def get_attrs(op_type):
def get_outvars(op_type, indent=1):
inputs = _get_inputs(op_type)
if len(inputs) == 0:
return ""
ret = ""
for t in _get_outputs(op_type):
ret += get_indent_space(
......@@ -244,11 +246,10 @@ def {op_type}({args}):
return code
print(get_op_py("uniform_random_batch_size_like"))
#print(get_op_py("uniform_random_batch_size_like"))
#print(get_op_py("gaussian_random"))
#print(get_op_py("sampling_id"))
#print(get_op_py("gaussian_random_batch_size_like"))
#print(get_op_py("sum"))
#print(get_op_py("slice"))
#print(get_op_py("shape"))
#get_meta("linear_chain_crf")
print(get_op_py("sampling_id"))
print(get_op_py("gaussian_random_batch_size_like"))
print(get_op_py("sum"))
print(get_op_py("slice"))
print(get_op_py("shape"))
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册