From 0718113a9c40cc0dab39b399016b6cc7c0d9dbc3 Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Tue, 18 Sep 2018 02:29:12 +0000 Subject: [PATCH] modification --- paddle/fluid/API.spec | 2 +- paddle/fluid/operators/elementwise_mul_op.h | 1 - paddle/fluid/operators/matmul_op.cc | 10 ++++------ paddle/fluid/operators/scale_op.cc | 6 ++++++ paddle/fluid/operators/scale_op.h | 8 ++++++-- python/paddle/fluid/layers/nn.py | 14 +++----------- 6 files changed, 20 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 211186a65f..ed2739232e 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -130,7 +130,7 @@ paddle.fluid.layers.split ArgSpec(args=['input', 'num_or_sections', 'dim', 'name paddle.fluid.layers.ctc_greedy_decoder ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.edit_distance ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens'], varargs=None, keywords=None, defaults=(True, None)) paddle.fluid.layers.l2_normalize ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)) -paddle.fluid.layers.matmul ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'scale', 'bias', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, 0.0, None)) +paddle.fluid.layers.matmul ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None)) paddle.fluid.layers.topk ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.warpctc ArgSpec(args=['input', 'label', 'blank', 'norm_by_times'], varargs=None, keywords=None, defaults=(0, False)) paddle.fluid.layers.sequence_reshape ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None) diff --git a/paddle/fluid/operators/elementwise_mul_op.h b/paddle/fluid/operators/elementwise_mul_op.h index 2148ed2591..b870d08a1a 100644 --- a/paddle/fluid/operators/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise_mul_op.h @@ -93,7 +93,6 @@ class ElementwiseMulGradKernel : public ElemwiseGradKernel { auto* x = ctx.Input("X"); auto* y = ctx.Input("Y"); - // auto* out = ctx.Input("Out"); auto* dout = ctx.Input(framework::GradVarName("Out")); auto* out = dout; // out is not necessary auto* dx = ctx.Output(framework::GradVarName("X")); diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 75645598f8..242a1b9ae9 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -59,9 +59,8 @@ class MatMulKernel : public framework::OpKernel { RowMatrixFromVector(x.dims()), 0, context.Attr("transpose_X")); auto mat_dim_b = math::CreateMatrixDescriptor( ColumnMatrixFromVector(y.dims()), 0, context.Attr("transpose_Y")); - auto scale = static_cast(context.Attr("scale")); - auto bias = static_cast(context.Attr("bias")); - blas.MatMul(x, mat_dim_a, y, mat_dim_b, scale, out, bias); + auto scale = static_cast(context.Attr("alpha")); + blas.MatMul(x, mat_dim_a, y, mat_dim_b, scale, out, T(0)); } }; @@ -188,7 +187,7 @@ class MatMulGradKernel : public framework::OpKernel { auto mat_dim_a = math::CreateMatrixDescriptor(a.dims(), 0, trans_a); auto mat_dim_b = math::CreateMatrixDescriptor(b.dims(), 0, trans_b); blas.MatMul(a, mat_dim_a, b, mat_dim_b, - static_cast(context.Attr("scale")), out, T(0)); + static_cast(context.Attr("alpha")), out, T(0)); } void CalcInputGrad(const framework::ExecutionContext &context, @@ -337,8 +336,7 @@ class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { R"DOC(If true, use the transpose of `Y`. )DOC") .SetDefault(false); - AddAttr("scale", "Scale").SetDefault(1.0f); - AddAttr("bias", "Bias").SetDefault(0.0f); + AddAttr("alpha", "The scale of Out").SetDefault(1.0f); AddComment(R"DOC( MatMul Operator. diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index 87642b9481..13be6c65be 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -53,6 +53,11 @@ $$Out = scale*X$$ AddAttr("scale", "The scaling factor of the scale operator.") .SetDefault(1.0); AddAttr("bias", "The bias of the scale operator.").SetDefault(0.0); + AddAttr( + "bias_after_scale", + "Apply bias addition after or before scaling. It is useful for " + "numeric stability in some circumstances.") + .SetDefault(true); } }; @@ -82,6 +87,7 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker { grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttr("scale", GetAttr("scale")); grad_op->SetAttr("bias", 0.0f); + grad_op->SetAttr("bias_after_scale", true); return std::unique_ptr(grad_op); } }; diff --git a/paddle/fluid/operators/scale_op.h b/paddle/fluid/operators/scale_op.h index 7b659153ae..d8a199bc2b 100644 --- a/paddle/fluid/operators/scale_op.h +++ b/paddle/fluid/operators/scale_op.h @@ -35,6 +35,7 @@ class ScaleKernel : public framework::OpKernel { auto scale = static_cast(ctx.Attr("scale")); auto bias = static_cast(ctx.Attr("bias")); + auto bias_after_scale = ctx.Attr("bias_after_scale"); if (in_var->IsType() && in_var != out_var) { auto& in_slr = in_var->Get(); @@ -46,8 +47,11 @@ class ScaleKernel : public framework::OpKernel { auto eigen_out = framework::EigenVector::Flatten(*out); auto eigen_in = framework::EigenVector::Flatten(*in); auto& dev = *ctx.template device_context().eigen_device(); - eigen_out.device(dev) = - static_cast(scale) * eigen_in + static_cast(bias); + if (bias_after_scale) { + eigen_out.device(dev) = scale * eigen_in + bias; + } else { + eigen_out.device(dev) = scale * (eigen_in + bias); + } } }; diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 8c0899a815..35f2876f32 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3388,13 +3388,7 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): return out -def matmul(x, - y, - transpose_x=False, - transpose_y=False, - scale=1.0, - bias=0.0, - name=None): +def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): """ Applies matrix multiplication to two tensors. @@ -3428,8 +3422,7 @@ def matmul(x, y (Variable): The input variable which is a Tensor or LoDTensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. - scale (float): The scale of output. Default 1.0. - bias (float): The bias added to output. Default 0.0. + alpha (float): The scale of output. Default 1.0. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. @@ -3500,8 +3493,7 @@ def matmul(x, attrs={ 'transpose_X': transpose_x, 'transpose_Y': transpose_y, - 'scale': scale, - 'bias': bias + 'alpha': alpha, }) return out -- GitLab