diff --git a/paddle/framework/eigen.h b/paddle/framework/eigen.h index 0438e758e0b170e7e23243a42984507189b2a3e5..54bbeafcabdeeb1e2c1017c156b3512c83dada3a 100644 --- a/paddle/framework/eigen.h +++ b/paddle/framework/eigen.h @@ -87,11 +87,11 @@ template { // Flatten reshapes a Tensor into an EigenVector. static typename EigenVector::Type Flatten(Tensor& tensor) { - return EigenVector::From(tensor, {static_cast(product(tensor.dims_))}); + return EigenVector::From(tensor, {product(tensor.dims_)}); } static typename EigenVector::ConstType Flatten(const Tensor& tensor) { - return EigenVector::From(tensor, {static_cast(product(tensor.dims_))}); + return EigenVector::From(tensor, {product(tensor.dims_)}); } }; diff --git a/paddle/framework/eigen_test.cc b/paddle/framework/eigen_test.cc index bae82fdb7d48c7029dd8c0d11c5bdca70b43da53..bc4a2db32cfba66bef2c444e1f822e0d2a57b91e 100644 --- a/paddle/framework/eigen_test.cc +++ b/paddle/framework/eigen_test.cc @@ -110,8 +110,7 @@ TEST(Eigen, Matrix) { TEST(Eigen, MatrixReshape) { Tensor t; - float* p = - t.mutable_data(make_ddim({2, 3, 6, 4}), platform::CPUPlace()); + float* p = t.mutable_data({2, 3, 6, 4}, platform::CPUPlace()); for (int i = 0; i < 2 * 3 * 6 * 4; ++i) { p[i] = static_cast(i); } diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index a2c2d19dc73b72f4d595379ad534e235d6da8f50..55302ea47120f420e952b26830c8ea4cbcce6435 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -267,7 +267,7 @@ TEST(Tensor, ReshapeToMatrix) { using namespace paddle::framework; using namespace paddle::platform; Tensor src; - int* src_ptr = src.mutable_data(make_ddim({2, 3, 4, 9}), CPUPlace()); + int* src_ptr = src.mutable_data({2, 3, 4, 9}, CPUPlace()); for (int i = 0; i < 2 * 3 * 4 * 9; ++i) { src_ptr[i] = i; } diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index fb96d322e9e5040b1f12f9ceb58a5685d223bb0b..34595adedda745cb8e1137aaa99009a07bb29433 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -27,8 +27,8 @@ class MulOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); - int x_num_col_dims = GetAttr("x_num_col_dims"); - int y_num_col_dims = GetAttr("y_num_col_dims"); + int x_num_col_dims = Attr("x_num_col_dims"); + int y_num_col_dims = Attr("y_num_col_dims"); PADDLE_ENFORCE(x_dims.size() > x_num_col_dims, "The rank of input tensor X(%s) should be larger than " @@ -58,19 +58,19 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "The output of mul op"); AddAttr( "x_num_col_dims", - "mul_op can take tensors with more than two dimensions as input `X`, " - "in that case, tensors will be reshaped to a matrix. The matrix's " - "first dimension(column length) will be the product of tensor's last " - "`num_col_dims` dimensions, and the matrix's second dimension(row " - "length) will be the product of tensor's first `rank - num_col_dims` " - "dimensions.") + R"DOC(mul_op can take tensors with more than two dimensions as input `X`, + in that case, tensors will be reshaped to a matrix. The matrix's first + dimension(column length) will be the product of tensor's last + `num_col_dims` dimensions, and the matrix's second dimension(row length) + will be the product of tensor's first `rank - num_col_dims` dimensions. + )DOC") .SetDefault(1) .EqualLargerThan(1); AddAttr( "y_num_col_dims", - "mul_op can take tensors with more than two dimensions as input `Y`, " - "in that case, tensors will be reshaped to a matrix. Just like input " - "`X`.") + R"DOC(mul_op can take tensors with more than two dimensions as input `Y`, + in that case, tensors will be reshaped to a matrix. Just like input `X`. + )DOC") .SetDefault(1) .EqualLargerThan(1); AddComment(R"DOC( @@ -98,9 +98,9 @@ class MulOpGrad : public framework::OperatorWithKernel { auto *y_grad = ctx.Output(framework::GradVarName("Y")); auto x_mat_dims = - framework::flatten_to_2d(x_dims, GetAttr("x_num_col_dims")); + framework::flatten_to_2d(x_dims, Attr("x_num_col_dims")); auto y_mat_dims = - framework::flatten_to_2d(y_dims, GetAttr("y_num_col_dims")); + framework::flatten_to_2d(y_dims, Attr("y_num_col_dims")); PADDLE_ENFORCE_EQ( x_mat_dims[0], out_dims[0], diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index 6656ecaf1aff05082071f1a5956072f1a70cf63f..3c01f868bda8cba488b3403df456d63d6b082fa6 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -37,12 +37,12 @@ class MulKernel : public framework::OpKernel { const Tensor x_matrix = x->dims().size() > 2 ? framework::ReshapeToMatrix( - *x, context.template GetAttr("x_num_col_dims")) + *x, context.template Attr("x_num_col_dims")) : *x; const Tensor y_matrix = y->dims().size() > 2 ? framework::ReshapeToMatrix( - *y, context.template GetAttr("y_num_col_dims")) + *y, context.template Attr("y_num_col_dims")) : *y; z->mutable_data(context.GetPlace()); @@ -57,8 +57,8 @@ template class MulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - int x_num_col_dims = ctx.template GetAttr("x_num_col_dims"); - int y_num_col_dims = ctx.template GetAttr("y_num_col_dims"); + int x_num_col_dims = ctx.template Attr("x_num_col_dims"); + int y_num_col_dims = ctx.template Attr("y_num_col_dims"); const Tensor* x = ctx.Input("X"); const Tensor* y = ctx.Input("Y"); const Tensor x_matrix =