diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index d3583cf894991624f537a4073f14aacc470aadd0..cf0ac058f11c08b47ca396dbdac6a34a7514b16b 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -170,6 +170,13 @@ paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_ paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) +paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)) +paddle.fluid.layers.gaussian_random ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32', False)) +paddle.fluid.layers.sampling_id ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')) +paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')) +paddle.fluid.layers.sum ArgSpec(args=['x', 'use_mkldnn'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.slice ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.shape ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None) @@ -241,13 +248,6 @@ paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwarg paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.gaussian_random ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.sampling_id ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.sum ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.slice ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.shape ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) diff --git a/paddle/fluid/operators/sampling_id_op.cc b/paddle/fluid/operators/sampling_id_op.cc index 724463c95c4a29fb5c00fe791b389d3908771640..a4f41a170426a4650fd3bf8f7fec4758ff34e1b9 100644 --- a/paddle/fluid/operators/sampling_id_op.cc +++ b/paddle/fluid/operators/sampling_id_op.cc @@ -53,15 +53,16 @@ class SamplingIdOpMaker : public framework::OpProtoAndCheckerMaker { SamplingId Operator. A layer for sampling id from multinomial distribution from the input. Sampling one id for one sample.)DOC"); - AddAttr("min", "Minimum value of random. [default 0.0].") + AddAttr("min", "Minimum value of random. (float, default 0.0).") .SetDefault(0.0f); - AddAttr("max", "Maximun value of random. [default 1.0].") + AddAttr("max", "Maximun value of random. (float, default 1.0).") .SetDefault(1.0f); - AddAttr("seed", - "Random seed used for the random number engine. " - "0 means use a seed generated by the system." - "Note that if seed is not 0, this operator will always " - "generate the same random numbers every time. [default 0].") + AddAttr( + "seed", + "Random seed used for the random number engine. " + "0 means use a seed generated by the system." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time. (int, default 0).") .SetDefault(0); } }; diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 574d0d727cba9fa9de0cffbe116f71b9e65a7092..9772c65738a2c5373f657164e3bc379404ba642e 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -284,7 +284,7 @@ def detection_output(loc, target_box=loc, code_type='decode_center_size') compile_shape = scores.shape - run_shape = ops.shape(scores) + run_shape = nn.shape(scores) scores = nn.flatten(x=scores, axis=2) scores = nn.softmax(input=scores) scores = nn.reshape(x=scores, shape=compile_shape, actual_shape=run_shape) @@ -697,7 +697,7 @@ def ssd_loss(location, raise ValueError("Only support mining_type == max_negative now.") num, num_prior, num_class = confidence.shape - conf_shape = ops.shape(confidence) + conf_shape = nn.shape(confidence) def __reshape_to_2d(var): return nn.flatten(x=var, axis=2) @@ -724,7 +724,7 @@ def ssd_loss(location, target_label.stop_gradient = True conf_loss = nn.softmax_with_cross_entropy(confidence, target_label) # 3. Mining hard examples - actual_shape = ops.slice(conf_shape, axes=[0], starts=[0], ends=[2]) + actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2]) actual_shape.stop_gradient = True conf_loss = nn.reshape( x=conf_loss, shape=(num, num_prior), actual_shape=actual_shape) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 2cb61a9cd25c744710ab7ac9ea591902740f78da..a9696ac20060d1069a99a02a79a755a740e760f0 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -29,110 +29,29 @@ from .. import unique_name from functools import reduce __all__ = [ - 'fc', - 'embedding', - 'dynamic_lstm', - 'dynamic_lstmp', - 'dynamic_gru', - 'gru_unit', - 'linear_chain_crf', - 'crf_decoding', - 'cos_sim', - 'cross_entropy', - 'square_error_cost', - 'chunk_eval', - 'sequence_conv', - 'conv2d', - 'conv3d', - 'sequence_pool', - 'sequence_softmax', - 'softmax', - 'pool2d', - 'pool3d', - 'batch_norm', - 'beam_search_decode', - 'conv2d_transpose', - 'conv3d_transpose', - 'sequence_expand', - 'sequence_expand_as', - 'sequence_pad', - 'lstm_unit', - 'reduce_sum', - 'reduce_mean', - 'reduce_max', - 'reduce_min', - 'reduce_prod', - 'sequence_first_step', - 'sequence_last_step', - 'dropout', - 'split', - 'ctc_greedy_decoder', - 'edit_distance', - 'l2_normalize', - 'matmul', - 'topk', - 'warpctc', - 'sequence_reshape', - 'transpose', - 'im2sequence', - 'nce', - 'hsigmoid', - 'beam_search', - 'row_conv', - 'multiplex', - 'layer_norm', - 'softmax_with_cross_entropy', - 'smooth_l1', - 'one_hot', - 'autoincreased_step_counter', - 'reshape', - 'squeeze', - 'unsqueeze', - 'lod_reset', - 'lrn', - 'pad', - 'pad_constant_like', - 'label_smooth', - 'roi_pool', - 'dice_loss', - 'image_resize', - 'image_resize_short', - 'resize_bilinear', - 'gather', - 'scatter', - 'sequence_scatter', - 'random_crop', - 'mean_iou', - 'relu', - 'log', - 'crop', - 'rank_loss', - 'elu', - 'relu6', - 'pow', - 'stanh', - 'hard_sigmoid', - 'swish', - 'prelu', - 'brelu', - 'leaky_relu', - 'soft_relu', - 'flatten', - 'sequence_mask', - 'stack', - 'pad2d', - 'unstack', - 'sequence_enumerate', - 'expand', - 'sequence_concat', - 'scale', - 'elementwise_add', - 'elementwise_div', - 'elementwise_sub', - 'elementwise_mul', - 'elementwise_max', - 'elementwise_min', - 'elementwise_pow', + 'fc', 'embedding', 'dynamic_lstm', 'dynamic_lstmp', 'dynamic_gru', + 'gru_unit', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'cross_entropy', + 'square_error_cost', 'chunk_eval', 'sequence_conv', 'conv2d', 'conv3d', + 'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', 'pool3d', + 'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'conv3d_transpose', + 'sequence_expand', 'sequence_expand_as', 'sequence_pad', 'lstm_unit', + 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'reduce_prod', + 'sequence_first_step', 'sequence_last_step', 'dropout', 'split', + 'ctc_greedy_decoder', 'edit_distance', 'l2_normalize', 'matmul', 'topk', + 'warpctc', 'sequence_reshape', 'transpose', 'im2sequence', 'nce', + 'hsigmoid', 'beam_search', 'row_conv', 'multiplex', 'layer_norm', + 'softmax_with_cross_entropy', 'smooth_l1', 'one_hot', + 'autoincreased_step_counter', 'reshape', 'squeeze', 'unsqueeze', + 'lod_reset', 'lrn', 'pad', 'pad_constant_like', 'label_smooth', 'roi_pool', + 'dice_loss', 'image_resize', 'image_resize_short', 'resize_bilinear', + 'gather', 'scatter', 'sequence_scatter', 'random_crop', 'mean_iou', 'relu', + 'log', 'crop', 'rank_loss', 'elu', 'relu6', 'pow', 'stanh', 'hard_sigmoid', + 'swish', 'prelu', 'brelu', 'leaky_relu', 'soft_relu', 'flatten', + 'sequence_mask', 'stack', 'pad2d', 'unstack', 'sequence_enumerate', + 'expand', 'sequence_concat', 'scale', 'elementwise_add', 'elementwise_div', + 'elementwise_sub', 'elementwise_mul', 'elementwise_max', 'elementwise_min', + 'elementwise_pow', 'uniform_random_batch_size_like', 'gaussian_random', + 'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'shape' ] @@ -6463,6 +6382,246 @@ def expand(x, expand_times, name=None): return out +from paddle.fluid.framework import convert_np_dtype_to_dtype_ + + +@templatedoc() +def uniform_random_batch_size_like(input, + shape, + dtype='float32', + input_dim_idx=0, + output_dim_idx=0, + min=-1.0, + max=1.0, + seed=0): + """ + ${comment} + + Args: + input (Variable): ${input_comment} + shape (tuple|list): ${shape_comment} + input_dim_idx (Int): ${input_dim_idx_comment} + output_dim_idx (Int): ${output_dim_idx_comment} + min (Float): ${min_comment} + max (Float): ${max_comment} + seed (Int): ${seed_comment} + dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc + Returns: + out (Variable): ${out_comment} + + """ + + helper = LayerHelper('uniform_random_batch_size_like', **locals()) + out = helper.create_tmp_variable(dtype) + c_dtype = convert_np_dtype_to_dtype_(dtype) + helper.append_op( + type='uniform_random_batch_size_like', + inputs={'Input': input}, + outputs={'Out': out}, + attrs={ + 'shape': shape, + 'input_dim_idx': input_dim_idx, + 'output_dim_idx': output_dim_idx, + 'min': min, + 'max': max, + 'seed': seed, + 'dtype': c_dtype + }) + + return out + + +@templatedoc() +def gaussian_random(shape, + mean=0.0, + std=1.0, + seed=0, + dtype='float32', + use_mkldnn=False): + """ + ${comment} + + Args: + shape (tuple|list): ${shape_comment} + mean (Float): ${mean_comment} + std (Float): ${std_comment} + seed (Int): ${seed_comment} + dtype(np.dtype|core.VarDesc.VarType|str): Output data type. + use_mkldnn (Bool): Only used in mkldnn kernel. + + Returns: + out (Variable): ${out_comment} + + """ + + helper = LayerHelper('gaussian_random', **locals()) + out = helper.create_tmp_variable(dtype) + c_dtype = convert_np_dtype_to_dtype_(dtype) + helper.append_op( + type='gaussian_random', + outputs={'Out': out}, + attrs={ + 'shape': shape, + 'mean': mean, + 'std': std, + 'seed': seed, + 'dtype': c_dtype, + 'use_mkldnn': use_mkldnn + }) + + return out + + +@templatedoc() +def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): + """ + ${comment} + + Args: + x (Variable): ${x_comment} + min (Float): ${min_comment} + max (Float): ${max_comment} + seed (Float): ${seed_comment} + dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc + + Returns: + out (Variable): ${out_comment} + + """ + + helper = LayerHelper('sampling_id', **locals()) + out = helper.create_tmp_variable(dtype) + helper.append_op( + type='sampling_id', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'min': min, + 'max': max, + 'seed': seed}) + + return out + + +@templatedoc() +def gaussian_random_batch_size_like(input, + shape, + input_dim_idx=0, + output_dim_idx=0, + mean=0.0, + std=1.0, + seed=0, + dtype='float32'): + """ + ${comment} + + Args: + input (Variable): ${input_comment} + shape (tuple|list): ${shape_comment} + input_dim_idx (Int): ${input_dim_idx_comment} + output_dim_idx (Int): ${output_dim_idx_comment} + mean (Float): ${mean_comment} + std (Float): ${std_comment} + seed (Int): ${seed_comment} + dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc + + Returns: + out (Variable): ${out_comment} + """ + + helper = LayerHelper('gaussian_random_batch_size_like', **locals()) + out = helper.create_tmp_variable(dtype) + c_dtype = convert_np_dtype_to_dtype_(dtype) + helper.append_op( + type='gaussian_random_batch_size_like', + inputs={'Input': input}, + outputs={'Out': out}, + attrs={ + 'shape': shape, + 'input_dim_idx': input_dim_idx, + 'output_dim_idx': output_dim_idx, + 'mean': mean, + 'std': std, + 'seed': seed, + 'dtype': c_dtype + }) + + return out + + +@templatedoc() +def sum(x, use_mkldnn=False): + """ + ${comment} + + Args: + x (Variable): ${x_comment} + use_mkldnn (Bool): ${use_mkldnn_comment} + + Returns: + out (Variable): ${out_comment} + """ + + helper = LayerHelper('sum', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype('x')) + helper.append_op( + type='sum', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'use_mkldnn': use_mkldnn}) + + return out + + +@templatedoc() +def slice(input, axes, starts, ends): + """ + ${comment} + + Args: + input (Variable): ${input_comment}. + axes (List): ${axes_comment} + starts (List): ${starts_comment} + ends (List): ${ends_comment} + + Returns: + out (Variable): ${out_comment} + + """ + + helper = LayerHelper('slice', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) + helper.append_op( + type='slice', + inputs={'Input': input}, + outputs={'Out': out}, + attrs={'axes': axes, + 'starts': starts, + 'ends': ends}) + + return out + + +@templatedoc() +def shape(input): + """ + ${comment} + + Args: + input (Variable): ${input_comment} + + Returns: + out (Variable): ${out_comment} + + """ + + helper = LayerHelper('shape', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) + helper.append_op( + type='shape', inputs={'Input': input}, outputs={'Out': out}) + + return out + + def _elementwise_op(helper): op_type = helper.layer_type x = helper.kwargs.get('x', None) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 7867bfe00e25711643eab1ab8d0141dbbad3da52..8a533035b0a3d175073cb0b0884aa507bcff782c 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -45,13 +45,6 @@ __all__ = [ 'logical_or', 'logical_xor', 'logical_not', - 'uniform_random_batch_size_like', - 'gaussian_random', - 'sampling_id', - 'gaussian_random_batch_size_like', - 'sum', - 'slice', - 'shape', 'maxout', ] diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index f474cdae2054531d44724e0e3e0e58a35fb8ddcd..b8dc9e8ad7cd7cd100d5c3cb99319e6f5a37da91 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -541,7 +541,7 @@ class TestBook(unittest.TestCase): with program_guard(program): input = layers.data( name="input", shape=[3, 100, 100], dtype="float32") - out = layers.shape(input, name="shape") + out = layers.shape(input) self.assertIsNotNone(out) print(str(program)) @@ -758,6 +758,65 @@ class TestBook(unittest.TestCase): out = layers.expand(x, [1, 2]) print(str(program)) + def test_uniform_random_batch_size_like(self): + program = Program() + with program_guard(program): + input = layers.data(name="input", shape=[13, 11], dtype='float32') + out = layers.uniform_random_batch_size_like(input, [-1, 11]) + self.assertIsNotNone(out) + print(str(program)) + + def test_gaussian_random(self): + program = Program() + with program_guard(program): + out = layers.gaussian_random(shape=[20, 30]) + self.assertIsNotNone(out) + print(str(program)) + + def test_sampling_id(self): + program = Program() + with program_guard(program): + x = layers.data( + name="X", + shape=[13, 11], + dtype='float32', + append_batch_size=False) + + out = layers.sampling_id(x) + self.assertIsNotNone(out) + print(str(program)) + + def test_gaussian_random_batch_size_like(self): + program = Program() + with program_guard(program): + input = layers.data(name="input", shape=[13, 11], dtype='float32') + + out = layers.gaussian_random_batch_size_like( + input, shape=[-1, 11], mean=1.0, std=2.0) + self.assertIsNotNone(out) + print(str(program)) + + def test_sum(self): + program = Program() + with program_guard(program): + input = layers.data(name="input", shape=[13, 11], dtype='float32') + + out = layers.sum(input) + self.assertIsNotNone(out) + print(str(program)) + + def test_slice(self): + starts = [1, 0, 2] + ends = [3, 3, 4] + axes = [0, 1, 2] + + program = Program() + with program_guard(program): + input = layers.data( + name="input", shape=[3, 4, 5, 6], dtype='float32') + + out = layers.slice(input, axes=axes, starts=starts, ends=ends) + def test_softshrink(self): program = Program() with program_guard(program):