提交 ac663353 编写于 作者: D dzhwinter 提交者: GitHub

Merge pull request #3379 from dzhwinter/mul_op

"mul op gradient"
...@@ -25,8 +25,8 @@ void gemm<platform::CPUPlace, float>(const CBLAS_TRANSPOSE transA, ...@@ -25,8 +25,8 @@ void gemm<platform::CPUPlace, float>(const CBLAS_TRANSPOSE transA,
const float alpha, const float* A, const float alpha, const float* A,
const float* B, const float beta, float* C, const float* B, const float beta, float* C,
platform::DeviceContext* context) { platform::DeviceContext* context) {
int lda = K; int lda = (transA == CblasNoTrans) ? K : M;
int ldb = N; int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N; int ldc = N;
cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb, cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc); beta, C, ldc);
...@@ -40,8 +40,8 @@ void gemm<platform::CPUPlace, double>(const CBLAS_TRANSPOSE transA, ...@@ -40,8 +40,8 @@ void gemm<platform::CPUPlace, double>(const CBLAS_TRANSPOSE transA,
const double* B, const double beta, const double* B, const double beta,
double* C, double* C,
platform::DeviceContext* context) { platform::DeviceContext* context) {
int lda = K; int lda = (transA == CblasNoTrans) ? K : M;
int ldb = N; int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N; int ldc = N;
cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb, cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc); beta, C, ldc);
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::Tensor;
class MulOp : public framework::OperatorWithKernel { class MulOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
...@@ -59,10 +61,23 @@ class MulOpGrad : public framework::OperatorWithKernel { ...@@ -59,10 +61,23 @@ class MulOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override {} void InferShape(const framework::InferShapeContext &ctx) const override {
std::string DebugString() const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
LOG(INFO) << "MulGrad"; PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
return ""; PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE(x_dims[0] == out_dims[0],
"Out@GRAD M X N must equal to X dims 0, M ");
PADDLE_ENFORCE(y_dims[1] == out_dims[1],
"Out@GRAD M X N must equal to Y dims 1, N ");
x_grad->Resize(x_dims);
y_grad->Resize(y_dims);
} }
}; };
...@@ -72,3 +87,5 @@ class MulOpGrad : public framework::OperatorWithKernel { ...@@ -72,3 +87,5 @@ class MulOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad); REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad);
REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel<paddle::platform::CPUPlace, float>); REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(mul_grad,
ops::MulGradKernel<paddle::platform::CPUPlace, float>);
...@@ -17,3 +17,5 @@ ...@@ -17,3 +17,5 @@
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>); REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(mul_grad,
ops::MulGradKernel<paddle::platform::GPUPlace, float>);
...@@ -31,18 +31,34 @@ template <typename Place, typename T> ...@@ -31,18 +31,34 @@ template <typename Place, typename T>
class MulKernel : public framework::OpKernel { class MulKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair = { auto* X = context.Input<Tensor>("X");
{Eigen::IndexPair<Eigen::DenseIndex>(1, 0)}}; auto* Y = context.Input<Tensor>("Y");
auto* input0 = context.Input<Tensor>("X"); auto* Z = context.Output<Tensor>("Out");
auto* input1 = context.Input<Tensor>("Y"); Z->mutable_data<T>(context.GetPlace());
auto* output = context.Output<Tensor>("Out"); auto* device_context =
output->mutable_data<T>(context.GetPlace()); const_cast<platform::DeviceContext*>(context.device_context_);
auto X = EigenMatrix<T>::From(*input0); math::matmul<Place, T>(*X, false, *Y, false, 1, Z, 0, device_context);
auto Y = EigenMatrix<T>::From(*input1); }
auto Z = EigenMatrix<T>::From(*output); };
auto& place = context.GetEigenDevice<Place>();
template <typename Place, typename T>
Z.device(place) = X.contract(Y, dim_pair); class MulGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* X = ctx.Input<Tensor>("X");
auto* Y = ctx.Input<Tensor>("Y");
auto* dOut = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dY = ctx.Output<Tensor>(framework::GradVarName("Y"));
dX->mutable_data<T>(ctx.GetPlace());
dY->mutable_data<T>(ctx.GetPlace());
auto* device_context =
const_cast<platform::DeviceContext*>(ctx.device_context_);
// dX = dOut * Y'. dX: M x K, dOut : M x N, Y : K x N
math::matmul<Place, T>(*dOut, false, *Y, true, 1, dX, 0, device_context);
// dY = X' * dOut. dY: K x N, dOut : M x N, X : M x K
math::matmul<Place, T>(*X, true, *dOut, false, 1, dY, 0, device_context);
} }
}; };
......
import unittest import unittest
from op_test_util import OpTestMeta
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
class TestMulOp(unittest.TestCase): class TestMulOp(unittest.TestCase):
...@@ -15,5 +16,19 @@ class TestMulOp(unittest.TestCase): ...@@ -15,5 +16,19 @@ class TestMulOp(unittest.TestCase):
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
class MulGradOpTest(GradientChecker):
def test_mul(self):
op = create_op("mul")
inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
# mul op will enlarge the relative error
self.check_grad(
op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.5)
# TODO(dzh,qijun) : mulgrad test case need transpose feature of blas library
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册