未验证 提交 642cf6ca 编写于 作者: Z Zeng Jinle 提交者: GitHub

Merge pull request #13418 from sneaxiy/dam_save_memory

Modify some ops to save memory
...@@ -124,7 +124,7 @@ paddle.fluid.layers.split ArgSpec(args=['input', 'num_or_sections', 'dim', 'name ...@@ -124,7 +124,7 @@ paddle.fluid.layers.split ArgSpec(args=['input', 'num_or_sections', 'dim', 'name
paddle.fluid.layers.ctc_greedy_decoder ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.ctc_greedy_decoder ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.edit_distance ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens'], varargs=None, keywords=None, defaults=(True, None)) paddle.fluid.layers.edit_distance ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens'], varargs=None, keywords=None, defaults=(True, None))
paddle.fluid.layers.l2_normalize ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)) paddle.fluid.layers.l2_normalize ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None))
paddle.fluid.layers.matmul ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'name'], varargs=None, keywords=None, defaults=(False, False, None)) paddle.fluid.layers.matmul ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None))
paddle.fluid.layers.topk ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.topk ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.warpctc ArgSpec(args=['input', 'label', 'blank', 'norm_by_times'], varargs=None, keywords=None, defaults=(0, False)) paddle.fluid.layers.warpctc ArgSpec(args=['input', 'label', 'blank', 'norm_by_times'], varargs=None, keywords=None, defaults=(0, False))
paddle.fluid.layers.sequence_reshape ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.sequence_reshape ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None)
......
...@@ -129,6 +129,9 @@ class GradOpDescMakerBase { ...@@ -129,6 +129,9 @@ class GradOpDescMakerBase {
std::string ForwardOpType() const { return this->fwd_op_.Type(); } std::string ForwardOpType() const { return this->fwd_op_.Type(); }
protected:
const OpDesc& ForwardOp() const { return fwd_op_; }
private: private:
const OpDesc& fwd_op_; const OpDesc& fwd_op_;
const std::unordered_set<std::string>& no_grad_set_; const std::unordered_set<std::string>& no_grad_set_;
......
...@@ -13,9 +13,45 @@ See the License for the specific language governing permissions and ...@@ -13,9 +13,45 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/elementwise_mul_op.h" #include "paddle/fluid/operators/elementwise_mul_op.h"
#include <string>
#include "paddle/fluid/operators/elementwise_op.h" #include "paddle/fluid/operators/elementwise_op.h"
namespace paddle {
namespace operators {
class ElementwiseMulOpGradDescMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDesc> Apply() const override {
std::unique_ptr<framework::OpDesc> op(new framework::OpDesc());
op->SetType("elementwise_mul_grad");
op->SetInput("X", Input("X"));
op->SetInput("Y", Input("Y"));
op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
op->SetAttrMap(Attrs());
op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), InputGrad("Y"));
return op;
}
};
class ElementwiseMulOpMaker : public ElementwiseOpMaker {
protected:
virtual std::string GetName() const { return "Mul"; }
virtual std::string GetEquation() const { return "Out = X \\\\odot Y"; }
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_ELEMWISE_OP(elementwise_mul, "Mul", "Out = X \\\\odot Y"); REGISTER_OPERATOR(elementwise_mul, ops::ElementwiseOp,
ops::ElementwiseMulOpMaker, ops::ElementwiseOpInferVarType,
ops::ElementwiseMulOpGradDescMaker);
REGISTER_OPERATOR(elementwise_mul_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_mul, elementwise_mul,
ops::ElementwiseMulKernel<paddle::platform::CPUDeviceContext, float>, ops::ElementwiseMulKernel<paddle::platform::CPUDeviceContext, float>,
......
...@@ -93,8 +93,8 @@ class ElementwiseMulGradKernel : public ElemwiseGradKernel<T> { ...@@ -93,8 +93,8 @@ class ElementwiseMulGradKernel : public ElemwiseGradKernel<T> {
auto* x = ctx.Input<Tensor>("X"); auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Y"); auto* y = ctx.Input<Tensor>("Y");
auto* out = ctx.Input<Tensor>("Out");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* out = dout; // out is not necessary
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X")); auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y")); auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
int axis = ctx.Attr<int>("axis"); int axis = ctx.Attr<int>("axis");
......
...@@ -59,7 +59,8 @@ class MatMulKernel : public framework::OpKernel<T> { ...@@ -59,7 +59,8 @@ class MatMulKernel : public framework::OpKernel<T> {
RowMatrixFromVector(x.dims()), 0, context.Attr<bool>("transpose_X")); RowMatrixFromVector(x.dims()), 0, context.Attr<bool>("transpose_X"));
auto mat_dim_b = math::CreateMatrixDescriptor( auto mat_dim_b = math::CreateMatrixDescriptor(
ColumnMatrixFromVector(y.dims()), 0, context.Attr<bool>("transpose_Y")); ColumnMatrixFromVector(y.dims()), 0, context.Attr<bool>("transpose_Y"));
blas.MatMul(x, mat_dim_a, y, mat_dim_b, T(1), out, T(0)); auto scale = static_cast<T>(context.Attr<float>("alpha"));
blas.MatMul(x, mat_dim_a, y, mat_dim_b, scale, out, T(0));
} }
}; };
...@@ -185,7 +186,8 @@ class MatMulGradKernel : public framework::OpKernel<T> { ...@@ -185,7 +186,8 @@ class MatMulGradKernel : public framework::OpKernel<T> {
auto blas = math::GetBlas<DeviceContext, T>(context); auto blas = math::GetBlas<DeviceContext, T>(context);
auto mat_dim_a = math::CreateMatrixDescriptor(a.dims(), 0, trans_a); auto mat_dim_a = math::CreateMatrixDescriptor(a.dims(), 0, trans_a);
auto mat_dim_b = math::CreateMatrixDescriptor(b.dims(), 0, trans_b); auto mat_dim_b = math::CreateMatrixDescriptor(b.dims(), 0, trans_b);
blas.MatMul(a, mat_dim_a, b, mat_dim_b, T(1), out, T(0)); blas.MatMul(a, mat_dim_a, b, mat_dim_b,
static_cast<T>(context.Attr<float>("alpha")), out, T(0));
} }
void CalcInputGrad(const framework::ExecutionContext &context, void CalcInputGrad(const framework::ExecutionContext &context,
...@@ -334,6 +336,7 @@ class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -334,6 +336,7 @@ class MatMulOpMaker : public framework::OpProtoAndCheckerMaker {
R"DOC(If true, use the transpose of `Y`. R"DOC(If true, use the transpose of `Y`.
)DOC") )DOC")
.SetDefault(false); .SetDefault(false);
AddAttr<float>("alpha", "The scale of Out").SetDefault(1.0f);
AddComment(R"DOC( AddComment(R"DOC(
MatMul Operator. MatMul Operator.
......
...@@ -156,12 +156,29 @@ class MulGradOp : public framework::OperatorWithKernel { ...@@ -156,12 +156,29 @@ class MulGradOp : public framework::OperatorWithKernel {
} }
}; };
class MulOpGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDesc> Apply() const override {
std::unique_ptr<framework::OpDesc> retv(new framework::OpDesc());
retv->SetType("mul_grad");
retv->SetInput("X", Input("X"));
retv->SetInput("Y", Input("Y"));
retv->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
retv->SetOutput(framework::GradVarName("X"), InputGrad("X"));
retv->SetOutput(framework::GradVarName("Y"), InputGrad("Y"));
retv->SetAttrMap(Attrs());
return retv;
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, ops::MulOpGradMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(mul_grad, ops::MulGradOp); REGISTER_OPERATOR(mul_grad, ops::MulGradOp);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
mul, ops::MulKernel<paddle::platform::CPUDeviceContext, float>, mul, ops::MulKernel<paddle::platform::CPUDeviceContext, float>,
......
...@@ -52,6 +52,12 @@ $$Out = scale*X$$ ...@@ -52,6 +52,12 @@ $$Out = scale*X$$
)DOC"); )DOC");
AddAttr<float>("scale", "The scaling factor of the scale operator.") AddAttr<float>("scale", "The scaling factor of the scale operator.")
.SetDefault(1.0); .SetDefault(1.0);
AddAttr<float>("bias", "The bias of the scale operator.").SetDefault(0.0);
AddAttr<bool>(
"bias_after_scale",
"Apply bias addition after or before scaling. It is useful for "
"numeric stability in some circumstances.")
.SetDefault(true);
} }
}; };
...@@ -80,6 +86,8 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker { ...@@ -80,6 +86,8 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker {
grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetInput("X", OutputGrad("Out"));
grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetOutput("Out", InputGrad("X"));
grad_op->SetAttr("scale", GetAttr("scale")); grad_op->SetAttr("scale", GetAttr("scale"));
grad_op->SetAttr("bias", 0.0f);
grad_op->SetAttr("bias_after_scale", true);
return std::unique_ptr<framework::OpDesc>(grad_op); return std::unique_ptr<framework::OpDesc>(grad_op);
} }
}; };
......
...@@ -34,6 +34,8 @@ class ScaleKernel : public framework::OpKernel<T> { ...@@ -34,6 +34,8 @@ class ScaleKernel : public framework::OpKernel<T> {
"in and out should have the same dim"); "in and out should have the same dim");
auto scale = static_cast<T>(ctx.Attr<float>("scale")); auto scale = static_cast<T>(ctx.Attr<float>("scale"));
auto bias = static_cast<T>(ctx.Attr<float>("bias"));
auto bias_after_scale = ctx.Attr<bool>("bias_after_scale");
if (in_var->IsType<framework::SelectedRows>() && in_var != out_var) { if (in_var->IsType<framework::SelectedRows>() && in_var != out_var) {
auto& in_slr = in_var->Get<framework::SelectedRows>(); auto& in_slr = in_var->Get<framework::SelectedRows>();
...@@ -45,7 +47,11 @@ class ScaleKernel : public framework::OpKernel<T> { ...@@ -45,7 +47,11 @@ class ScaleKernel : public framework::OpKernel<T> {
auto eigen_out = framework::EigenVector<T>::Flatten(*out); auto eigen_out = framework::EigenVector<T>::Flatten(*out);
auto eigen_in = framework::EigenVector<T>::Flatten(*in); auto eigen_in = framework::EigenVector<T>::Flatten(*in);
auto& dev = *ctx.template device_context<DeviceContext>().eigen_device(); auto& dev = *ctx.template device_context<DeviceContext>().eigen_device();
eigen_out.device(dev) = scale * eigen_in; if (bias_after_scale) {
eigen_out.device(dev) = scale * eigen_in + bias;
} else {
eigen_out.device(dev) = scale * (eigen_in + bias);
}
} }
}; };
......
...@@ -3499,7 +3499,7 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): ...@@ -3499,7 +3499,7 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
return out return out
def matmul(x, y, transpose_x=False, transpose_y=False, name=None): def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
""" """
Applies matrix multiplication to two tensors. Applies matrix multiplication to two tensors.
...@@ -3533,6 +3533,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): ...@@ -3533,6 +3533,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
y (Variable): The input variable which is a Tensor or LoDTensor. y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
...@@ -3600,8 +3601,11 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): ...@@ -3600,8 +3601,11 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
inputs={'X': x, inputs={'X': x,
'Y': y}, 'Y': y},
outputs={'Out': out}, outputs={'Out': out},
attrs={'transpose_X': transpose_x, attrs={
'transpose_Y': transpose_y}) 'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': alpha,
})
return out return out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册