未验证 提交 42ea819e 编写于 作者: J JesseyXujin 提交者: GitHub

fix API doc, solve conflict, test=develop, test=document_fix (#20196) (#20319)

* fix APIs,test=develop,test=document_fix

* fix conflict, test=develop, test=document_fix

* fix confict, test=develop, test=document_fix

* fix confict, test=develop, test=document_fix

* fix API.spec, test=develop, test=document_fix

* change fluid.layers.data to fluid.data,test=develop, test=document_fix

* fix bug on example code, test=develop, test=document_fix

* fix API.spec,  test=develop, test=document_fix
上级 6ce87bf9
文件模式从 100644 更改为 100755
...@@ -132,7 +132,7 @@ paddle.fluid.layers.dynamic_lstm (ArgSpec(args=['input', 'size', 'h_0', 'c_0', ' ...@@ -132,7 +132,7 @@ paddle.fluid.layers.dynamic_lstm (ArgSpec(args=['input', 'size', 'h_0', 'c_0', '
paddle.fluid.layers.dynamic_lstmp (ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name', 'h_0', 'c_0', 'cell_clip', 'proj_clip'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None, None, None, None, None)), ('document', 'c37d51aad655c8a9f9b045c64717320a')) paddle.fluid.layers.dynamic_lstmp (ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name', 'h_0', 'c_0', 'cell_clip', 'proj_clip'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None, None, None, None, None)), ('document', 'c37d51aad655c8a9f9b045c64717320a'))
paddle.fluid.layers.dynamic_gru (ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None, False)), ('document', '83617c165827e030636c80486d5de6f3')) paddle.fluid.layers.dynamic_gru (ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None, False)), ('document', '83617c165827e030636c80486d5de6f3'))
paddle.fluid.layers.gru_unit (ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False)), ('document', '33974b9bfa69f2f1eb85e6f956dff04e')) paddle.fluid.layers.gru_unit (ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False)), ('document', '33974b9bfa69f2f1eb85e6f956dff04e'))
paddle.fluid.layers.linear_chain_crf (ArgSpec(args=['input', 'label', 'param_attr', 'length'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'bc7a0fd2bb2b35dfd2f54947320e78fa')) paddle.fluid.layers.linear_chain_crf (ArgSpec(args=['input', 'label', 'param_attr', 'length'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'b28bdb43160e9667be2a3457d19d9f5b'))
paddle.fluid.layers.crf_decoding (ArgSpec(args=['input', 'param_attr', 'label', 'length'], varargs=None, keywords=None, defaults=(None, None)), ('document', '933b7e268c4ffa3d5c3ef953a5ee9f0b')) paddle.fluid.layers.crf_decoding (ArgSpec(args=['input', 'param_attr', 'label', 'length'], varargs=None, keywords=None, defaults=(None, None)), ('document', '933b7e268c4ffa3d5c3ef953a5ee9f0b'))
paddle.fluid.layers.cos_sim (ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None), ('document', '07bb25484c98d529fbe67338422724af')) paddle.fluid.layers.cos_sim (ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None), ('document', '07bb25484c98d529fbe67338422724af'))
paddle.fluid.layers.cross_entropy (ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)), ('document', '789a141e97fd0b37241f630935936d08')) paddle.fluid.layers.cross_entropy (ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)), ('document', '789a141e97fd0b37241f630935936d08'))
...@@ -144,7 +144,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size', ...@@ -144,7 +144,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size',
paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name', 'data_format'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None, 'NCDHW')), ('document', 'a7e4573745c40b8b1d726709f209b6e4')) paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name', 'data_format'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None, 'NCDHW')), ('document', 'a7e4573745c40b8b1d726709f209b6e4'))
paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test', 'pad_value'], varargs=None, keywords=None, defaults=(False, 0.0)), ('document', 'e90a93251c52dc4e6fb34fb3991b3f82')) paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test', 'pad_value'], varargs=None, keywords=None, defaults=(False, 0.0)), ('document', 'e90a93251c52dc4e6fb34fb3991b3f82'))
paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'eaa9d0bbd3d4e017c8bc4ecdac483711')) paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'eaa9d0bbd3d4e017c8bc4ecdac483711'))
paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', 'cee673c79e3ff4582656a24e04f841e5')) paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '7ccaea1b93fe4f7387a6036692986c6b'))
paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCHW')), ('document', '630cae697d46b4b575b15d56cf8be25a')) paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCHW')), ('document', '630cae697d46b4b575b15d56cf8be25a'))
paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCDHW')), ('document', 'db0035a3132b1dfb12e53c57591fb9f6')) paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCDHW')), ('document', 'db0035a3132b1dfb12e53c57591fb9f6'))
paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '52343203de40afe29607397e13aaf0d2')) paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '52343203de40afe29607397e13aaf0d2'))
...@@ -191,7 +191,7 @@ paddle.fluid.layers.layer_norm (ArgSpec(args=['input', 'scale', 'shift', 'begin_ ...@@ -191,7 +191,7 @@ paddle.fluid.layers.layer_norm (ArgSpec(args=['input', 'scale', 'shift', 'begin_
paddle.fluid.layers.group_norm (ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)), ('document', '65231cc8281815124934b1439fbb750c')) paddle.fluid.layers.group_norm (ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)), ('document', '65231cc8281815124934b1439fbb750c'))
paddle.fluid.layers.spectral_norm (ArgSpec(args=['weight', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '9461e67095a6fc5d568fb2ce8fef66ff')) paddle.fluid.layers.spectral_norm (ArgSpec(args=['weight', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '9461e67095a6fc5d568fb2ce8fef66ff'))
paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '54e1675aa0364f4a78fa72804ec0f413')) paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '54e1675aa0364f4a78fa72804ec0f413'))
paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'ecb75c1b00c4c76c98b482f633b7a10c')) paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'cbe8940643ac80ef75e1abdfbdb09e88'))
paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'ec4115591be842868c86b2e5334245c6')) paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'ec4115591be842868c86b2e5334245c6'))
paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', '98e7927f09ee2270535b29f048e481ec')) paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', '98e7927f09ee2270535b29f048e481ec'))
paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'ca73fdc4551c5765c92eb00f24874289')) paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'ca73fdc4551c5765c92eb00f24874289'))
...@@ -229,7 +229,7 @@ paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'm ...@@ -229,7 +229,7 @@ paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'm
paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308')) paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308'))
paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67')) paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67'))
paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'ca34f88ff61cf2a7f4c97a493d6000d0')) paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'ca34f88ff61cf2a7f4c97a493d6000d0'))
paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.6666666666666666, 1.7159, None)), ('document', '1e1efad868714425da15c785dfb533a1')) paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.67, 1.7159, None)), ('document', 'd3f742178a7263adf5929153d104883d'))
paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591')) paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591'))
paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4')) paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4'))
paddle.fluid.layers.prelu (ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '1fadca6622c70bd33cc260817f4ff191')) paddle.fluid.layers.prelu (ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '1fadca6622c70bd33cc260817f4ff191'))
...@@ -436,8 +436,8 @@ paddle.fluid.layers.retinanet_detection_output (ArgSpec(args=['bboxes', 'scores' ...@@ -436,8 +436,8 @@ paddle.fluid.layers.retinanet_detection_output (ArgSpec(args=['bboxes', 'scores'
paddle.fluid.layers.distribute_fpn_proposals (ArgSpec(args=['fpn_rois', 'min_level', 'max_level', 'refer_level', 'refer_scale', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'be432c9b5f19ccba7aca38789ead29e4')) paddle.fluid.layers.distribute_fpn_proposals (ArgSpec(args=['fpn_rois', 'min_level', 'max_level', 'refer_level', 'refer_scale', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'be432c9b5f19ccba7aca38789ead29e4'))
paddle.fluid.layers.box_decoder_and_assign (ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'box_score', 'box_clip', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5203935538d06a6d47b8630ad80cb2b0')) paddle.fluid.layers.box_decoder_and_assign (ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'box_score', 'box_clip', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5203935538d06a6d47b8630ad80cb2b0'))
paddle.fluid.layers.collect_fpn_proposals (ArgSpec(args=['multi_rois', 'multi_scores', 'min_level', 'max_level', 'post_nms_top_n', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '808fcca082e0040e2b77dbc53a0cf9d5')) paddle.fluid.layers.collect_fpn_proposals (ArgSpec(args=['multi_rois', 'multi_scores', 'min_level', 'max_level', 'post_nms_top_n', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '808fcca082e0040e2b77dbc53a0cf9d5'))
paddle.fluid.layers.accuracy (ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)), ('document', 'ef799022a6040597462ae2b3d2f1c407')) paddle.fluid.layers.accuracy (ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)), ('document', 'b691b7be425e281bd36897b514b2b064'))
paddle.fluid.layers.auc (ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)), ('document', '34b4575807f955f7e8698b8dead23858')) paddle.fluid.layers.auc (ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)), ('document', 'c36ac7125da977c2bd1b192bee301f75'))
paddle.fluid.layers.exponential_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'eaf430c5a0380fb11bfe9a8922cd6295')) paddle.fluid.layers.exponential_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'eaf430c5a0380fb11bfe9a8922cd6295'))
paddle.fluid.layers.natural_exp_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'aa3146f64d5d508e4e50687603aa7b15')) paddle.fluid.layers.natural_exp_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'aa3146f64d5d508e4e50687603aa7b15'))
paddle.fluid.layers.inverse_time_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'ea37a3a8a0b3ce2254e7bc49a0951dbe')) paddle.fluid.layers.inverse_time_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'ea37a3a8a0b3ce2254e7bc49a0951dbe'))
......
...@@ -502,10 +502,12 @@ $out = x^{factor}$ ...@@ -502,10 +502,12 @@ $out = x^{factor}$
class STanhOpMaker : public framework::OpProtoAndCheckerMaker { class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput("X", "Input of STanh operator"); AddInput("X",
AddOutput("Out", "Output of STanh operator"); "Input of STanh operator."
AddAttr<float>("scale_a", "The scale parameter of a for the input") " A LoDTensor or Tensor with type float32, float64.");
.SetDefault(2.0f / 3.0f); AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
.SetDefault(0.67f);
AddAttr<float>("scale_b", "The scale parameter of b for the input") AddAttr<float>("scale_b", "The scale parameter of b for the input")
.SetDefault(1.7159f); .SetDefault(1.7159f);
AddComment(R"DOC( AddComment(R"DOC(
......
...@@ -22,14 +22,14 @@ namespace operators { ...@@ -22,14 +22,14 @@ namespace operators {
class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput( AddInput("Emission",
"Emission", "(LoDTensor/Tensor<float>). When a LoDTensor input,A 2-D LoDTensor"
"(LoDTensor/Tensor<float>). When a LoDTensor input, A 2-D LoDTensor" " with shape [N x D], where N is the size of the "
" with shape [N x D], where N is the size of the " "mini-batch and D is the total tag number. The unscaled emission "
"mini-batch and D is the total tag number. The unscaled emission " "weight matrix for the linear chain CRF. When a Tensor input,"
"weight matrix for the linear chain CRF. When a Tensor input," "A Tensor with shape [N x S x D], where N is batch number,"
"A Tensor with shape [N x S x D], where N is batch size," "S is max length of sequences, D is the total tag number."
"S is max length of sequences, D is the total tag number."); "A LoDTensor or Tensor with type float32, float64.");
AddInput("Transition", AddInput("Transition",
"(Tensor, default Tensor<float>) A 2-D Tensor with shape " "(Tensor, default Tensor<float>) A 2-D Tensor with shape "
"[(D + 2) x D]. The learnable parameter for the linear_chain_crf " "[(D + 2) x D]. The learnable parameter for the linear_chain_crf "
...@@ -38,10 +38,12 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -38,10 +38,12 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker {
"(LoDTensor/Tensor<int64_t>), when a LoDTensor input, " "(LoDTensor/Tensor<int64_t>), when a LoDTensor input, "
"[N x 1], where N is the total element number in a mini-batch. " "[N x 1], where N is the total element number in a mini-batch. "
"when a Tensor input, [N x S], where N is batch number. " "when a Tensor input, [N x S], where N is batch number. "
"S is max length of sequences. The ground truth."); "S is max length of sequences. The ground truth."
"A LoDTensor or Tensor with int64.");
AddInput("Length", AddInput("Length",
"(Tensor, default Tensor<int64_t>) A Tensor with shape " "(Tensor, default Tensor<int64_t>) A Tensor with shape "
"[M x 1], where M is the sequence number in a mini-batch.") "[M x 1], where M is the sequence number in a mini-batch."
"A Tensor with type int64.")
.AsDispensable(); .AsDispensable();
AddOutput( AddOutput(
"Alpha", "Alpha",
...@@ -60,14 +62,16 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -60,14 +62,16 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker {
"(Tensor, default Tensor<float>), the same shape with Emission. " "(Tensor, default Tensor<float>), the same shape with Emission. "
"The exponentials of Input(Emission). This is an intermediate " "The exponentials of Input(Emission). This is an intermediate "
"computational result in forward computation, and will be reused in " "computational result in forward computation, and will be reused in "
"backward computation.") "backward computation."
"A LoDTensor or Tensor with type float32, float64.")
.AsIntermediate(); .AsIntermediate();
AddOutput( AddOutput(
"TransitionExps", "TransitionExps",
"(Tensor, default Tensor<float>) A 2-D Tensor with shape " "(Tensor, default Tensor<float>) A 2-D Tensor with shape "
"[(D + 2) x D]. The exponentials of Input(Transition). This is an " "[(D + 2) x D]. The exponentials of Input(Transition). This is an "
"intermediate computational result in forward computation, and " "intermediate computational result in forward computation, and "
"will be reused in backward computation.") "will be reused in backward computation."
"A LoDTensor or Tensor with type float32, float64.")
.AsIntermediate(); .AsIntermediate();
AddOutput( AddOutput(
"LogLikelihood", "LogLikelihood",
...@@ -75,7 +79,7 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -75,7 +79,7 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker {
"likelihood of each training sample in a mini-batch. This is a 2-D " "likelihood of each training sample in a mini-batch. This is a 2-D "
"tensor with shape [S x 1], where S is the sequence number in a " "tensor with shape [S x 1], where S is the sequence number in a "
"mini-batch. Note: S is equal to the sequence number in a mini-batch. " "mini-batch. Note: S is equal to the sequence number in a mini-batch. "
"The output is no longer a LoDTensor."); "A Tensor with type float32, float64.");
AddComment(R"DOC( AddComment(R"DOC(
Conditional Random Field defines an undirected probabilistic graph with nodes Conditional Random Field defines an undirected probabilistic graph with nodes
denoting random variables and edges denoting dependencies between these denoting random variables and edges denoting dependencies between these
......
...@@ -37,25 +37,38 @@ def accuracy(input, label, k=1, correct=None, total=None): ...@@ -37,25 +37,38 @@ def accuracy(input, label, k=1, correct=None, total=None):
Note: the dtype of accuracy is determined by input. the input and label dtype can be different. Note: the dtype of accuracy is determined by input. the input and label dtype can be different.
Args: Args:
input(Variable): The input of accuracy layer, which is the predictions of network. input(Variable): The input of accuracy layer, which is the predictions of network. A LoDTensor or Tensor with type float32,float64.
Carry LoD information is supported. label(Variable): The label of dataset. LoDTensor or Tensor with type int32,int64.
label(Variable): The label of dataset. k(int): The top k predictions for each class will be checked. Data type is int64 or int32.
k(int): The top k predictions for each class will be checked. correct(Variable): The correct predictions count. A Tensor with type int64 or int32.
correct(Variable): The correct predictions count. total(Variable): The total entries count. A tensor with type int64 or int32.
total(Variable): The total entries count.
Returns: Returns:
Variable: The correct rate. Variable: The correct rate. A Tensor with type float32.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
data = fluid.layers.data(name="data", shape=[-1, 32, 32], dtype="float32") import numpy as np
label = fluid.layers.data(name="label", shape=[-1,1], dtype="int32")
predict = fluid.layers.fc(input=data, size=10) data = fluid.data(name="input", shape=[-1, 32, 32], dtype="float32")
accuracy_out = fluid.layers.accuracy(input=predict, label=label, k=5) label = fluid.data(name="label", shape=[-1,1], dtype="int")
fc_out = fluid.layers.fc(input=data, size=10)
predict = fluid.layers.softmax(input=fc_out)
result = fluid.layers.accuracy(input=predict, label=label, k=5)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 32, 32).astype("float32")
y = np.array([[1],[0],[1]])
output= exe.run(feed={"input": x,"label": y},
fetch_list=[result[0]])
print(output)
#[array([0.6666667], dtype=float32)]
""" """
helper = LayerHelper("accuracy", **locals()) helper = LayerHelper("accuracy", **locals())
topk_out, topk_indices = nn.topk(input, k=k) topk_out, topk_indices = nn.topk(input, k=k)
...@@ -105,8 +118,10 @@ def auc(input, ...@@ -105,8 +118,10 @@ def auc(input,
[0, 1]. Each row is sorted in descending order. This [0, 1]. Each row is sorted in descending order. This
input should be the output of topk. Typically, this input should be the output of topk. Typically, this
Variable indicates the probability of each label. Variable indicates the probability of each label.
A LoDTensor or Tensor with type float32,float64.
label(Variable): A 2D int Variable indicating the label of the training label(Variable): A 2D int Variable indicating the label of the training
data. The height is batch size and width is always 1. data. The height is batch size and width is always 1.
A LoDTensor or Tensor with type int32,int64.
curve(str): Curve type, can be 'ROC' or 'PR'. Default 'ROC'. curve(str): Curve type, can be 'ROC' or 'PR'. Default 'ROC'.
num_thresholds(int): The number of thresholds to use when discretizing num_thresholds(int): The number of thresholds to use when discretizing
the roc curve. Default 200. the roc curve. Default 200.
...@@ -118,15 +133,30 @@ def auc(input, ...@@ -118,15 +133,30 @@ def auc(input,
Variable: A tuple representing the current AUC. Variable: A tuple representing the current AUC.
The return tuple is auc_out, batch_auc_out, [ The return tuple is auc_out, batch_auc_out, [
batch_stat_pos, batch_stat_neg, stat_pos, stat_neg ] batch_stat_pos, batch_stat_neg, stat_pos, stat_neg ]
Data type is Tensor, supporting float32, float64.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") import numpy as np
label = fluid.layers.data(name="label", shape=[1], dtype="int32")
predict = fluid.layers.fc(input=data, size=2) data = fluid.data(name="input", shape=[-1, 32,32], dtype="float32")
auc_out = fluid.layers.auc(input=predict, label=label) label = fluid.data(name="label", shape=[-1], dtype="int")
fc_out = fluid.layers.fc(input=data, size=2)
predict = fluid.layers.softmax(input=fc_out)
result=fluid.layers.auc(input=predict, label=label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,32,32).astype("float32")
y = np.array([1,0,1])
output= exe.run(feed={"input": x,"label": y},
fetch_list=[result[0]])
print(output)
#[array([0.5])]
""" """
helper = LayerHelper("auc", **locals()) helper = LayerHelper("auc", **locals())
auc_out = helper.create_variable_for_type_inference(dtype="float64") auc_out = helper.create_variable_for_type_inference(dtype="float64")
......
...@@ -1418,7 +1418,7 @@ def linear_chain_crf(input, label, param_attr=None, length=None): ...@@ -1418,7 +1418,7 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
${comment} ${comment}
Args: Args:
input(${emission_type}): ${emission_comment} input(${emission_type}): ${emission_comment}
label(${label_type}): ${label_comment} label(${label_type}): ${label_comment}
Length(${length_type}): ${length_comment} Length(${length_type}): ${length_comment}
param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter. param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter.
...@@ -1426,7 +1426,7 @@ def linear_chain_crf(input, label, param_attr=None, length=None): ...@@ -1426,7 +1426,7 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
Returns: Returns:
output(${emission_exps_type}): ${emission_exps_comment} \n output(${emission_exps_type}): ${emission_exps_comment} \n
output(${transition_exps_type}): ${transition_exps_comment} \n output(${transition_exps_type}): ${transition_exps_comment} \n
output(${log_likelihood_type}): ${log_likelihood_comment} output(${log_likelihood_type}): ${log_likelihood_comment} \n
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -1438,8 +1438,8 @@ def linear_chain_crf(input, label, param_attr=None, length=None): ...@@ -1438,8 +1438,8 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
train_program = fluid.Program() train_program = fluid.Program()
startup_program = fluid.Program() startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
input_data = fluid.layers.data(name='input_data', shape=[10], dtype='float32', lod_level=1) input_data = fluid.data(name='input_data', shape=[-1,10], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int', lod_level=1) label = fluid.data(name='label', shape=[-1,1], dtype='int')
emission= fluid.layers.fc(input=input_data, size=10, act="tanh") emission= fluid.layers.fc(input=input_data, size=10, act="tanh")
crf_cost = fluid.layers.linear_chain_crf( crf_cost = fluid.layers.linear_chain_crf(
input=emission, input=emission,
...@@ -1462,9 +1462,9 @@ def linear_chain_crf(input, label, param_attr=None, length=None): ...@@ -1462,9 +1462,9 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
train_program = fluid.Program() train_program = fluid.Program()
startup_program = fluid.Program() startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
input_data2 = fluid.layers.data(name='input_data2', shape=[10,10], dtype='float32') input_data2 = fluid.data(name='input_data2', shape=[-1,10,10], dtype='float32')
label2 = fluid.layers.data(name='label2', shape=[10,1], dtype='int') label2 = fluid.data(name='label2', shape=[-1,10,1], dtype='int')
label_length = fluid.layers.data(name='length', shape=[1], dtype='int') label_length = fluid.data(name='length', shape=[-1,1], dtype='int')
emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2) emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2)
crf_cost2 = fluid.layers.linear_chain_crf( crf_cost2 = fluid.layers.linear_chain_crf(
input=emission2, input=emission2,
...@@ -1482,15 +1482,19 @@ def linear_chain_crf(input, label, param_attr=None, length=None): ...@@ -1482,15 +1482,19 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
#define data, using padding #define data, using padding
cc=np.random.rand(4,10,10).astype('float32') cc=np.random.rand(4,10,10).astype('float32')
dd=np.random.rand(4,10,1).astype('int64') dd=np.random.rand(4,10,1).astype('int64')
ll=np.array([[3,3,4,2]]) ll=np.array([[3],[3],[4],[2]])
feed2 = {'input_data2':cc,'label2':dd,'length':ll} feed2 = {'input_data2':cc,'label2':dd,'length':ll}
loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2]) loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2])
print(loss2) print(loss2)
#[array([[ 7.8902354],
# [ 7.3602567],
# [ 10.004011],
# [ 5.86721 ]], dtype=float32)]
#you can use find_var to get transition parameter. #you can use find_var to get transition parameter.
transition=np.array(fluid.global_scope().find_var('crfw').get_tensor()) transition=np.array(fluid.global_scope().find_var('crfw').get_tensor())
print(transition) print(transition)
""" """
helper = LayerHelper('linear_chain_crf', **locals()) helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[2] if length else input.shape[1] size = input.shape[2] if length else input.shape[1]
...@@ -2250,7 +2254,8 @@ def softmax(input, use_cudnn=False, name=None, axis=-1): ...@@ -2250,7 +2254,8 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])} Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}
Args: Args:
input (Variable): The input variable. input (Variable): The input variable. A LoDTensor or Tensor with type
float32, float64.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \ use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve numerical stablity, set use_cudnn to \ library is installed. To improve numerical stablity, set use_cudnn to \
False by default. Default: False False by default. Default: False
...@@ -2258,23 +2263,28 @@ def softmax(input, use_cudnn=False, name=None, axis=-1): ...@@ -2258,23 +2263,28 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
will be named automatically. Default: None. will be named automatically. Default: None.
axis (int): The index of dimension to perform softmax calculations, it should axis (int): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input variable. Default: -1. input variable. Default: -1. -1 means the last dimension.
Returns: Returns:
Variable: output of softmax Variable: output of softmax. A Tensor with type float32, float64.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2], dtype='float32') import numpy as np
fc = fluid.layers.fc(input=x, size=10)
# perform softmax in the second dimension
softmax = fluid.layers.softmax(input=fc, axis=1)
# perform softmax in the last dimension
softmax = fluid.layers.softmax(input=fc, axis=-1)
data = fluid.data(name="input", shape=[-1, 3],dtype="float32")
result = fluid.layers.softmax(data,axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 3).astype("float32")
output= exe.run(feed={"input": x},
fetch_list=[result[0]])
print(output)
#array([0.22595254, 0.39276356, 0.38128382], dtype=float32)]
""" """
helper = LayerHelper('softmax', **locals()) helper = LayerHelper('softmax', **locals())
if not isinstance(input, Variable): if not isinstance(input, Variable):
...@@ -7657,31 +7667,47 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): ...@@ -7657,31 +7667,47 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
Args: Args:
x (Variable): A tensor with rank at least 2. The input value of smooth x (Variable): A tensor with rank at least 2. The input value of smooth
L1 loss op with shape [batch_size, dim1, ..., dimN]. L1 loss op with shape [batch_size, dim1, ..., dimN].
A LoDTensor or Tensor with type float32.
y (Variable): A tensor with rank at least 2. The target value of smooth y (Variable): A tensor with rank at least 2. The target value of smooth
L1 loss op with same shape as :attr:`x`. L1 loss op with same shape as :attr:`x`.
A LoDTensor or Tensor with type float32.
inside_weight (Variable|None): A tensor with rank at least 2. This inside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If input is optional and should have same shape with :attr:`x`. If
provided, the result of (:attr:`x` - :attr:`y`) will be multiplied provided, the result of (:attr:`x` - :attr:`y`) will be multiplied
by this tensor element by element. by this tensor element by element.
A Tensor with type float32.
outside_weight (Variable|None): A tensor with rank at least 2. This outside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If input is optional and should have same shape with :attr:`x`. If
provided, the out smooth L1 loss will be multiplied by this tensor provided, the out smooth L1 loss will be multiplied by this tensor
element by element. element by element.
A Tensor with type float32.
sigma (float|None): Hyper parameter of smooth L1 loss layer. A float sigma (float|None): Hyper parameter of smooth L1 loss layer. A float
scalar with default value 1.0. scalar with default value 1.0.
Returns: Returns:
Variable: The output smooth L1 loss with shape [batch_size, 1]. Variable: The output smooth L1 loss with shape [batch_size, 1]. A Tensor with type float32.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
data = fluid.layers.data(name='data', shape=[128], dtype='float32') import numpy as np
label = fluid.layers.data( data = fluid.data(name="x", shape=[-1, 3], dtype="float32")
name='label', shape=[100], dtype='float32') label = fluid.data(name="y", shape=[-1, 3], dtype="float32")
fc = fluid.layers.fc(input=data, size=100) result = fluid.layers.smooth_l1(data,label)
out = fluid.layers.smooth_l1(x=fc, y=label) place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,3).astype("float32")
y = np.random.rand(3,3).astype("float32")
output= exe.run(feed={"x":x, "y":y},
fetch_list=[result])
print(output)
#[array([[0.08220536],
# [0.36652038],
# [0.20541131]], dtype=float32)]
""" """
helper = LayerHelper('smooth_l1_loss', **locals()) helper = LayerHelper('smooth_l1_loss', **locals())
...@@ -10808,7 +10834,7 @@ def pow(x, factor=1.0, name=None): ...@@ -10808,7 +10834,7 @@ def pow(x, factor=1.0, name=None):
@templatedoc() @templatedoc()
def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None): def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
""" """
${comment} ${comment}
Args: Args:
...@@ -10819,15 +10845,28 @@ def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None): ...@@ -10819,15 +10845,28 @@ def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None):
will be named automatically. will be named automatically.
Returns: Returns:
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32") import numpy as np
y = fluid.layers.stanh(x, scale_a=0.67, scale_b=1.72) data = fluid.data(name="input", shape=[-1, 3])
result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.random(size=(3, 3)).astype('float32')
output= exe.run(feed={"input": x},
fetch_list=[result])
print(output)
#[array([[0.626466 , 0.89842904, 0.7501062 ],
# [0.25147712, 0.7484996 , 0.22902708],
# [0.62705994, 0.23110689, 0.56902856]], dtype=float32)]
""" """
helper = LayerHelper('stanh', **locals()) helper = LayerHelper('stanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册