From 736d078cbf07fc1fc610a90e2bedc7bc57398224 Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 19 Jul 2017 22:30:34 +0800 Subject: [PATCH] replace Tensor::tensor to EigenTensor::From --- paddle/operators/mul_op.cc | 6 +++--- paddle/operators/mul_op.cu | 4 ++-- paddle/operators/mul_op.h | 11 +++++++---- paddle/operators/rowwise_add_op.cc | 4 ++-- paddle/operators/rowwise_add_op.cu | 4 ++-- paddle/operators/rowwise_add_op.h | 11 ++++++----- paddle/operators/sigmoid_op.cc | 4 ++-- paddle/operators/sigmoid_op.cu | 4 ++-- paddle/operators/sigmoid_op.h | 10 ++++++---- paddle/operators/softmax_op.cc | 4 ++-- paddle/operators/softmax_op.cu | 4 ++-- paddle/operators/softmax_op.h | 9 +++++---- 12 files changed, 41 insertions(+), 34 deletions(-) diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 7aa63961a..fa2247868 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -12,9 +12,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include -#include -#include +#include "paddle/operators/mul_op.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/tensor.h" namespace paddle { namespace operators { diff --git a/paddle/operators/mul_op.cu b/paddle/operators/mul_op.cu index 75f00e746..3ee581dc7 100644 --- a/paddle/operators/mul_op.cu +++ b/paddle/operators/mul_op.cu @@ -12,8 +12,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include "paddle/operators/mul_op.h" +#include "paddle/framework/op_registry.h" REGISTER_OP_GPU_KERNEL(mul, paddle::operators::MulKernel -#include +#include "glog/logging.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/operator.h" namespace paddle { namespace operators { @@ -34,8 +35,10 @@ public: output->mutable_data(context.GetPlace()); - output->matrix().device(*(context.GetEigenDevice())) = - input0.matrix().contract(input1.matrix(), dim_pair); + framework::EigenMatrix::From(*output).device( + *(context.GetEigenDevice())) = + framework::EigenMatrix::From(input0).contract( + framework::EigenMatrix::From(input1), dim_pair); } }; } // namespace operators diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 567b058fd..2590dff7b 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -12,8 +12,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include "paddle/operators/rowwise_add_op.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/operators/rowwise_add_op.cu b/paddle/operators/rowwise_add_op.cu index 58fe96a4a..5dfac4fd2 100644 --- a/paddle/operators/rowwise_add_op.cu +++ b/paddle/operators/rowwise_add_op.cu @@ -1,5 +1,5 @@ -#include -#include +#include "paddle/framework/op_registry.h" +#include "paddle/operators/rowwise_add_op.h" REGISTER_OP_GPU_KERNEL( rowwise_add, diff --git a/paddle/operators/rowwise_add_op.h b/paddle/operators/rowwise_add_op.h index f1d43002d..ffe937840 100644 --- a/paddle/operators/rowwise_add_op.h +++ b/paddle/operators/rowwise_add_op.h @@ -13,8 +13,9 @@ limitations under the License. */ #pragma once -#include -#include +#include "glog/logging.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/operator.h" namespace paddle { namespace operators { @@ -27,9 +28,9 @@ public: auto in1 = context.Input(1)->Get(); auto* out = context.Output(0)->GetMutable(); - auto input = in0.matrix(); - auto bias = in1.vec(); - auto output = out->matrix(); + auto input = framework::EigenMatrix::From(in0); + auto bias = framework::EigenVector::From(in1); + auto output = framework::EigenMatrix::From(*out); const int bias_size = bias.dimension(0); const int rest_size = input.size() / bias_size; diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index fa13f2c4f..589b48ce8 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -12,8 +12,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include "paddle/operators/sigmoid_op.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/operators/sigmoid_op.cu b/paddle/operators/sigmoid_op.cu index 59bba2729..ed344b2bf 100644 --- a/paddle/operators/sigmoid_op.cu +++ b/paddle/operators/sigmoid_op.cu @@ -1,5 +1,5 @@ -#include -#include +#include "paddle/operators/sigmoid_op.h" +#include "paddle/framework/op_registry.h" REGISTER_OP_GPU_KERNEL( sigmoid, paddle::operators::SigmoidKernel); diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h index 7995b7529..2b9356246 100644 --- a/paddle/operators/sigmoid_op.h +++ b/paddle/operators/sigmoid_op.h @@ -14,8 +14,9 @@ #pragma once -#include -#include +#include "glog/logging.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/operator.h" namespace paddle { namespace operators { @@ -29,8 +30,9 @@ public: output->mutable_data(context.GetPlace()); - output->flat().device(*(context.GetEigenDevice())) = - 1.0 / (1.0 + (-1.0 * input.flat()).exp()); + framework::EigenVector::Flatten(*output).device( + *(context.GetEigenDevice())) = + 1.0 / (1.0 + (-1.0 * framework::EigenVector::Flatten(input)).exp()); } }; } // namespace operators diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 42795adbd..81bad7486 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -11,8 +11,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include "paddle/operators/softmax_op.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/operators/softmax_op.cu b/paddle/operators/softmax_op.cu index 730c76a04..60676191e 100644 --- a/paddle/operators/softmax_op.cu +++ b/paddle/operators/softmax_op.cu @@ -1,5 +1,5 @@ -#include -#include +#include "paddle/framework/op_registry.h" +#include "paddle/operators/softmax_op.h" REGISTER_OP_GPU_KERNEL( softmax, paddle::operators::SoftmaxKernel); diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 34a6c299b..53c626a79 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -14,8 +14,9 @@ #pragma once -#include -#include +#include "glog/logging.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/operator.h" namespace paddle { namespace operators { @@ -27,8 +28,8 @@ public: auto input = context.Input(0)->Get(); auto* output = context.Output(0)->GetMutable(); - auto logits = input.matrix(); - auto softmax = output->matrix(); + auto logits = framework::EigenMatrix::From(input); + auto softmax = framework::EigenMatrix::From(*output); const int kBatchDim = 0; const int kClassDim = 1; -- GitLab