From c5d7107767a1a42f46e7d0bf42ef26279fd562db Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Mon, 13 Nov 2017 20:17:38 +0800 Subject: [PATCH] refine var name --- paddle/operators/bilinear_tensor_product_op.h | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/paddle/operators/bilinear_tensor_product_op.h b/paddle/operators/bilinear_tensor_product_op.h index 984e7abdfb1..ffa4f43a327 100644 --- a/paddle/operators/bilinear_tensor_product_op.h +++ b/paddle/operators/bilinear_tensor_product_op.h @@ -43,25 +43,25 @@ class BilinearTensorProductKernel : public framework::OpKernel { auto batch_size = x->dims()[0]; auto weight_dims = weight->dims(); - int Out_dim = weight_dims[0]; - int X_dim = weight_dims[1]; - int Y_dim = weight_dims[2]; + int out_dim = weight_dims[0]; + auto x_dim = weight_dims[1]; + auto y_dim = weight_dims[2]; auto place = ctx.GetEigenDevice(); // Create the intermediate variable to caculate the result of // Input(X) multiplied by Input(Weight_i), the formula is: // left_mul = X Weight_i. Tensor left_mul; - left_mul.mutable_data(framework::make_ddim({batch_size, Y_dim}), + left_mul.mutable_data(framework::make_ddim({batch_size, y_dim}), ctx.GetPlace()); auto left_mul_mat = EigenMatrix::From(left_mul); - for (int i = 0; i < Out_dim; ++i) { + for (int i = 0; i < out_dim; ++i) { auto output_col_vec = output_mat.chip(i, 1); Tensor weight_mat = - weight->Slice(i, i + 1).Resize(framework::make_ddim({X_dim, Y_dim})); + weight->Slice(i, i + 1).Resize(framework::make_ddim({x_dim, y_dim})); math::gemm(ctx.device_context(), CblasNoTrans, CblasNoTrans, - batch_size, Y_dim, X_dim, 1, x->data(), + batch_size, y_dim, x_dim, 1, x->data(), weight_mat.data(), 0, left_mul.data()); output_col_vec.device(place) = (left_mul_mat * y_mat).sum(Eigen::DSizes(1)); @@ -89,9 +89,9 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { auto batch_size = x->dims()[0]; auto weight_dims = weight->dims(); - int Out_dim = weight_dims[0]; - int X_dim = weight_dims[1]; - int Y_dim = weight_dims[2]; + int out_dim = weight_dims[0]; + auto x_dim = weight_dims[1]; + auto y_dim = weight_dims[2]; auto x_mat = EigenMatrix::From(*x); auto y_mat = EigenMatrix::From(*y); @@ -100,13 +100,13 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { // Create the intermediate variable to caculate the Output(Y@Grad). Tensor x_scale; - x_scale.mutable_data(framework::make_ddim({batch_size, X_dim}), + x_scale.mutable_data(framework::make_ddim({batch_size, x_dim}), ctx.GetPlace()); auto x_scale_mat = EigenMatrix::From(x_scale); // Create the intermediate variable to caculate the Output(X@Grad). Tensor y_scale; - y_scale.mutable_data(framework::make_ddim({batch_size, Y_dim}), + y_scale.mutable_data(framework::make_ddim({batch_size, y_dim}), ctx.GetPlace()); auto y_scale_mat = EigenMatrix::From(y_scale); @@ -126,11 +126,11 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { // Caculate the Output(X@Grad) and Output(Y@Grad). if (d_x || d_y) { - Eigen::DSizes bcast_for_x(1, Y_dim); - Eigen::DSizes bcast_for_y(1, X_dim); - for (int i = 0; i < Out_dim; ++i) { + Eigen::DSizes bcast_for_x(1, y_dim); + Eigen::DSizes bcast_for_y(1, x_dim); + for (int i = 0; i < out_dim; ++i) { Tensor weight_i = weight->Slice(i, i + 1).Resize( - framework::make_ddim({X_dim, Y_dim})); + framework::make_ddim({x_dim, y_dim})); auto output_vec = d_out_mat.chip(i, 1); if (d_x) { y_scale_mat.device(place) = @@ -138,7 +138,7 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { .broadcast(bcast_for_x) * y_mat; math::gemm(ctx.device_context(), CblasNoTrans, CblasTrans, - batch_size, X_dim, Y_dim, 1, y_scale.data(), + batch_size, x_dim, y_dim, 1, y_scale.data(), weight_i.data(), 1, d_x->data()); } if (d_y) { @@ -147,7 +147,7 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { .broadcast(bcast_for_y) * x_mat; math::gemm(ctx.device_context(), CblasNoTrans, CblasNoTrans, - batch_size, Y_dim, X_dim, 1, x_scale.data(), + batch_size, y_dim, x_dim, 1, x_scale.data(), weight_i.data(), 1, d_y->data()); } } @@ -156,17 +156,17 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { // Caculate the gradient of Input(Weight). if (d_weight) { d_weight->mutable_data(ctx.GetPlace()); - Eigen::DSizes bcast_for_weight(1, X_dim); - for (int i = 0; i < Out_dim; ++i) { + Eigen::DSizes bcast_for_weight(1, x_dim); + for (int i = 0; i < out_dim; ++i) { Tensor d_weight_i = d_weight->Slice(i, i + 1).Resize( - framework::make_ddim({X_dim, Y_dim})); + framework::make_ddim({x_dim, y_dim})); auto output_vec = d_out_mat.chip(i, 1); x_scale_mat.device(place) = output_vec.reshape(Eigen::DSizes(batch_size, 1)) .broadcast(bcast_for_weight) * x_mat; math::gemm(ctx.device_context(), CblasTrans, CblasNoTrans, - X_dim, Y_dim, batch_size, 1, x_scale.data(), + x_dim, y_dim, batch_size, 1, x_scale.data(), y->data(), 0, d_weight_i.data()); } } -- GitLab