diff --git a/paddle/fluid/lite/operators/CMakeLists.txt b/paddle/fluid/lite/operators/CMakeLists.txt index cfb40eebdb1b7c5fd0dfc806c0de7ebbd8083207..fe690973b60bd555c413cf86a76933215b03aa9f 100644 --- a/paddle/fluid/lite/operators/CMakeLists.txt +++ b/paddle/fluid/lite/operators/CMakeLists.txt @@ -12,7 +12,7 @@ cc_library(activation_ops_lite SRCS activation_ops.cc DEPS ${op_DEPS}) cc_library(elementwise_ops_lite SRCS elementwise_ops.cc DEPS ${op_DEPS}) cc_library(mean_op_lite SRCS mean_op.cc DEPS ${op_DEPS}) cc_library(fill_constant_op_lite SRCS fill_constant_op.cc DEPS ${op_DEPS}) -cc_library(sgd_op_lite SRCS sgd_op.cc DEPS ${op_DEPS}) +#cc_library(sgd_op_lite SRCS sgd_op.cc DEPS ${op_DEPS}) cc_library(op_params_lite SRCS op_params.cc DEPS ${tensor_lite} any_lite framework_proto_lite) set(ops_lite diff --git a/paddle/fluid/lite/operators/activation_ops.cc b/paddle/fluid/lite/operators/activation_ops.cc index d53bb0c9e31615339e1fc6f53ec4934922daca18..4b99c4d9e06d5c7e4f706ba009ccae9848900d69 100644 --- a/paddle/fluid/lite/operators/activation_ops.cc +++ b/paddle/fluid/lite/operators/activation_ops.cc @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#ifdef LITE_WITH_X86 #include "paddle/fluid/framework/operator.h" +#endif #include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_registry.h" @@ -48,6 +50,7 @@ class ActivationOp : public OpLite { mutable ActivationParam param_; }; +#ifdef LITE_WITH_X86 class ActivationGradOp : public OpLite { public: explicit ActivationGradOp(const std::string& type) : OpLite(type) {} @@ -79,10 +82,13 @@ class ActivationGradOp : public OpLite { private: mutable ActivationGradParam param_; }; +#endif } // namespace operators } // namespace lite } // namespace paddle REGISTER_LITE_OP(square, paddle::lite::operators::ActivationOp); +#ifdef LITE_WITH_X86 REGISTER_LITE_OP(square_grad, paddle::lite::operators::ActivationGradOp); +#endif diff --git a/paddle/fluid/lite/operators/elementwise_ops.cc b/paddle/fluid/lite/operators/elementwise_ops.cc index 044e621a1dfa95181609cbadc7d07c326d0a47b9..9e1267ad01f4ffaf6a848f63f27050fae32b7893 100644 --- a/paddle/fluid/lite/operators/elementwise_ops.cc +++ b/paddle/fluid/lite/operators/elementwise_ops.cc @@ -37,7 +37,6 @@ class ElementwiseOp : public OpLite { } bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override { - CHECK_EQ(opdesc.Inputs().size(), 2UL); auto X_name = opdesc.Input("X").front(); auto Y_name = opdesc.Input("Y").front(); auto Out_name = opdesc.Output("Out").front(); @@ -45,7 +44,7 @@ class ElementwiseOp : public OpLite { param_.X = GetVar(scope, X_name); param_.Y = GetVar(scope, Y_name); param_.Out = GetMutableVar(scope, Out_name); - param_.axis = boost::get(opdesc.GetAttr("axis")); + param_.axis = GetAttr(opdesc.GetAttr("axis")); return true; } @@ -58,6 +57,7 @@ class ElementwiseOp : public OpLite { mutable operators::ElementwiseParam param_; }; +#ifdef LITE_WITH_X86 class ElementwiseGradExplicitOp : public OpLite { public: explicit ElementwiseGradExplicitOp(const std::string& type) : OpLite(type) {} @@ -99,11 +99,14 @@ class ElementwiseGradExplicitOp : public OpLite { private: mutable operators::ElementwiseGradParam param_; }; +#endif } // namespace operators } // namespace lite } // namespace paddle REGISTER_LITE_OP(elementwise_sub, paddle::lite::operators::ElementwiseOp); +#ifdef LITE_WITH_X86 REGISTER_LITE_OP(elementwise_sub_grad, paddle::lite::operators::ElementwiseGradExplicitOp); +#endif diff --git a/paddle/fluid/lite/operators/fill_constant_op.cc b/paddle/fluid/lite/operators/fill_constant_op.cc index 7671318fb3e5ff1a398b03b643647631bff44217..f701dd48775fa331c50ade5a3dda425c9551c1a0 100644 --- a/paddle/fluid/lite/operators/fill_constant_op.cc +++ b/paddle/fluid/lite/operators/fill_constant_op.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_registry.h" @@ -35,14 +34,13 @@ class FillConstantOp : public OpLite { } bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override { - CHECK_EQ(opdesc.Inputs().size(), 2UL); auto Out_name = opdesc.Output("Out").front(); param_.Out = GetMutableVar(scope, Out_name); - param_.dtype = boost::get(opdesc.GetAttr("dtype")); - param_.shape = boost::get>(opdesc.GetAttr("shape")); - param_.value = boost::get(opdesc.GetAttr("value")); - param_.force_cpu = boost::get(opdesc.GetAttr("force_cpu")); + param_.dtype = GetAttr(opdesc.GetAttr("dtype")); + param_.shape = GetAttr>(opdesc.GetAttr("shape")); + param_.value = GetAttr(opdesc.GetAttr("value")); + param_.force_cpu = GetAttr(opdesc.GetAttr("force_cpu")); return true; } diff --git a/paddle/fluid/lite/operators/mean_op.cc b/paddle/fluid/lite/operators/mean_op.cc index 89798ca2e5f12d530a67aea4f10617facbf30994..20e3709872fc51b5c889a4a3958da45213f9c35f 100644 --- a/paddle/fluid/lite/operators/mean_op.cc +++ b/paddle/fluid/lite/operators/mean_op.cc @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#ifdef LITE_WITH_X86 #include "paddle/fluid/framework/operator.h" +#endif #include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_registry.h" @@ -36,7 +38,6 @@ class MeanOp : public OpLite { } bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override { - CHECK_EQ(opdesc.Inputs().size(), 2UL); auto X_name = opdesc.Input("X").front(); auto Out_name = opdesc.Output("Out").front(); @@ -53,6 +54,7 @@ class MeanOp : public OpLite { mutable operators::ElementwiseParam param_; }; +#ifdef LITE_WITH_X86 class MeanGradOp : public OpLite { public: explicit MeanGradOp(const std::string& type) : OpLite(type) {} @@ -89,10 +91,13 @@ class MeanGradOp : public OpLite { private: mutable operators::MeanGradParam param_; }; +#endif } // namespace operators } // namespace lite } // namespace paddle REGISTER_LITE_OP(mean, paddle::lite::operators::MeanOp); +#ifdef LITE_WITH_X86 REGISTER_LITE_OP(mean_grad, paddle::lite::operators::MeanGradOp); +#endif diff --git a/paddle/fluid/lite/operators/mul_op.cc b/paddle/fluid/lite/operators/mul_op.cc index 75b536a093d11c264832e8875b82e7fe07ad2ba9..8958d91956a7604c7ac29701cfa6367ed16dedcf 100644 --- a/paddle/fluid/lite/operators/mul_op.cc +++ b/paddle/fluid/lite/operators/mul_op.cc @@ -31,15 +31,16 @@ bool MulOpLite::CheckShape() const { CHECK_GT_OR_FALSE(x_dims.size(), static_cast(param_.x_num_col_dims)); CHECK_GT_OR_FALSE(y_dims.size(), static_cast(param_.y_num_col_dims)); - auto x_mat_dims = - framework::flatten_to_2d(x_dims.data(), param_.x_num_col_dims); - auto y_mat_dims = - framework::flatten_to_2d(y_dims.data(), param_.y_num_col_dims); - - PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0], - "First matrix's width must be equal with second matrix's " - "height. %s, %s", - x_mat_dims[1], y_mat_dims[0]); + // auto x_mat_dims = + // framework::flatten_to_2d(x_dims.data(), param_.x_num_col_dims); + // auto y_mat_dims = + // framework::flatten_to_2d(y_dims.data(), param_.y_num_col_dims); + + // PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0], + // "First matrix's width must be equal with second matrix's + // " + // "height. %s, %s", + // x_mat_dims[1], y_mat_dims[0]); return true; } @@ -66,6 +67,8 @@ bool MulOpLite::InferShape() const { return true; } +#ifdef LITE_WITH_X86 + bool MulGradOpLite::CheckShape() const { CHECK_OR_FALSE(param_.x); CHECK_OR_FALSE(param_.y); @@ -97,6 +100,7 @@ bool MulGradOpLite::AttachImpl(const OpDesc &op_desc, lite::Scope *scope) { return true; } +#endif } // namespace operators } // namespace lite