diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 1c3192b0193b8ba2452e959c983b23df458a652a..243e74c9a3d6a14677e494aa610f3432c7f16832 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -92,7 +92,7 @@ paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'poo paddle.fluid.layers.adaptive_pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)) paddle.fluid.layers.adaptive_pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)) paddle.fluid.layers.batch_norm ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False)) -paddle.fluid.layers.data_norm ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'use_mkldnn', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, False, None, None, None, False)) +paddle.fluid.layers.data_norm ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, None, None, None, False)) paddle.fluid.layers.beam_search_decode ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.conv2d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) paddle.fluid.layers.conv3d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index d5bc25d19cba4de6f059612e3e8c4a65b2edd0f9..45bce6e5203f8c1dbb744e0f954f7f0a71c53372 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -140,9 +140,6 @@ class DataNormOpMaker : public framework::OpProtoAndCheckerMaker { "Scales of the history data batch, " "will apply to output when training") .AsIntermediate(); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); AddComment(R"DOC( Data Normalization. diff --git a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc index c8ee13875c5ae772de3c09f97fded8f70c5698e6..640644a94690d9682a5e6b1aa788a9ebdc5d2a54 100644 --- a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc +++ b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc @@ -117,11 +117,11 @@ class TeacherStudentSigmoidLossOpMaker "[N x 1]. The teacher student sigmoid loss."); AddAttr( "soft_max_up_bound", - "fp32, if input > soft_max_up_bound, will be bound, default 15.0") + "fp32, if input > soft_max_up_bound, input will be bound, default 15.0") .SetDefault(15.0); - AddAttr( - "soft_max_lower_bound", - "fp32, if input < soft_max_lower_bound, will be bound, default -15.0") + AddAttr("soft_max_lower_bound", + "fp32, if input < soft_max_lower_bound, input will be " + "bound, default -15.0") .SetDefault(-15.0); AddComment(R"DOC( TeacherStudentSigmoidLoss Operator. @@ -134,7 +134,7 @@ we add another label(z') to original. label = {-2, -1, [0, 2]} when z' is not exist, clk = 0 : label = -2; when z' is not exist, clk = 1 : label = -1; - when z' is exist , clk = 0 : label = 0 + z'; + when z' is exist , clk = 0 : label = 0 + z'; when z' is exist , clk = 1 : label = 1 + z'; )DOC"); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 250dc24bd8f028b22d04b3f8dde082c7e236e402..4a8488b68c6bc41d5ea6404d1519c2d7de3ebffd 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3041,7 +3041,6 @@ def data_norm(input, param_attr=None, data_layout='NCHW', in_place=False, - use_mkldnn=False, name=None, moving_mean_name=None, moving_variance_name=None, @@ -3075,7 +3074,6 @@ def data_norm(input, param_attr(ParamAttr): The parameter attribute for Parameter `scale`. data_layout(string, default NCHW): NCHW|NHWC in_place(bool, Default False): Make the input and output of batch norm reuse memory. - use_mkldnn(bool, Default false): ${use_mkldnn_comment} name(string, Default None): A name for this layer(optional). If set None, the layer will be named automatically. moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. @@ -3156,8 +3154,7 @@ def data_norm(input, outputs={"Y": data_norm_out, "Means": means, "Scales": scales}, - attrs={"epsilon": epsilon, - "use_mkldnn": use_mkldnn}) + attrs={"epsilon": epsilon}) return helper.append_activation(data_norm_out) @@ -9945,6 +9942,7 @@ def teacher_student_sigmoid_loss(input, Examples: .. code-block:: python + cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label) """ helper = LayerHelper('teacher_student_sigmoid_loss', **locals())