提交 c161f967 编写于 作者: L liuwei1031 提交者: GitHub

fix compilation issue (#17725)

上级 adba6b83
...@@ -12,7 +12,7 @@ cc_library(activation_ops_lite SRCS activation_ops.cc DEPS ${op_DEPS}) ...@@ -12,7 +12,7 @@ cc_library(activation_ops_lite SRCS activation_ops.cc DEPS ${op_DEPS})
cc_library(elementwise_ops_lite SRCS elementwise_ops.cc DEPS ${op_DEPS}) cc_library(elementwise_ops_lite SRCS elementwise_ops.cc DEPS ${op_DEPS})
cc_library(mean_op_lite SRCS mean_op.cc DEPS ${op_DEPS}) cc_library(mean_op_lite SRCS mean_op.cc DEPS ${op_DEPS})
cc_library(fill_constant_op_lite SRCS fill_constant_op.cc DEPS ${op_DEPS}) cc_library(fill_constant_op_lite SRCS fill_constant_op.cc DEPS ${op_DEPS})
cc_library(sgd_op_lite SRCS sgd_op.cc DEPS ${op_DEPS}) #cc_library(sgd_op_lite SRCS sgd_op.cc DEPS ${op_DEPS})
cc_library(op_params_lite SRCS op_params.cc DEPS ${tensor_lite} any_lite framework_proto_lite) cc_library(op_params_lite SRCS op_params.cc DEPS ${tensor_lite} any_lite framework_proto_lite)
set(ops_lite set(ops_lite
......
...@@ -12,7 +12,9 @@ ...@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#ifdef LITE_WITH_X86
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#endif
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
...@@ -48,6 +50,7 @@ class ActivationOp : public OpLite { ...@@ -48,6 +50,7 @@ class ActivationOp : public OpLite {
mutable ActivationParam param_; mutable ActivationParam param_;
}; };
#ifdef LITE_WITH_X86
class ActivationGradOp : public OpLite { class ActivationGradOp : public OpLite {
public: public:
explicit ActivationGradOp(const std::string& type) : OpLite(type) {} explicit ActivationGradOp(const std::string& type) : OpLite(type) {}
...@@ -79,10 +82,13 @@ class ActivationGradOp : public OpLite { ...@@ -79,10 +82,13 @@ class ActivationGradOp : public OpLite {
private: private:
mutable ActivationGradParam param_; mutable ActivationGradParam param_;
}; };
#endif
} // namespace operators } // namespace operators
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_LITE_OP(square, paddle::lite::operators::ActivationOp); REGISTER_LITE_OP(square, paddle::lite::operators::ActivationOp);
#ifdef LITE_WITH_X86
REGISTER_LITE_OP(square_grad, paddle::lite::operators::ActivationGradOp); REGISTER_LITE_OP(square_grad, paddle::lite::operators::ActivationGradOp);
#endif
...@@ -37,7 +37,6 @@ class ElementwiseOp : public OpLite { ...@@ -37,7 +37,6 @@ class ElementwiseOp : public OpLite {
} }
bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override { bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override {
CHECK_EQ(opdesc.Inputs().size(), 2UL);
auto X_name = opdesc.Input("X").front(); auto X_name = opdesc.Input("X").front();
auto Y_name = opdesc.Input("Y").front(); auto Y_name = opdesc.Input("Y").front();
auto Out_name = opdesc.Output("Out").front(); auto Out_name = opdesc.Output("Out").front();
...@@ -45,7 +44,7 @@ class ElementwiseOp : public OpLite { ...@@ -45,7 +44,7 @@ class ElementwiseOp : public OpLite {
param_.X = GetVar<lite::Tensor>(scope, X_name); param_.X = GetVar<lite::Tensor>(scope, X_name);
param_.Y = GetVar<lite::Tensor>(scope, Y_name); param_.Y = GetVar<lite::Tensor>(scope, Y_name);
param_.Out = GetMutableVar<Tensor>(scope, Out_name); param_.Out = GetMutableVar<Tensor>(scope, Out_name);
param_.axis = boost::get<int>(opdesc.GetAttr("axis")); param_.axis = GetAttr<int>(opdesc.GetAttr("axis"));
return true; return true;
} }
...@@ -58,6 +57,7 @@ class ElementwiseOp : public OpLite { ...@@ -58,6 +57,7 @@ class ElementwiseOp : public OpLite {
mutable operators::ElementwiseParam param_; mutable operators::ElementwiseParam param_;
}; };
#ifdef LITE_WITH_X86
class ElementwiseGradExplicitOp : public OpLite { class ElementwiseGradExplicitOp : public OpLite {
public: public:
explicit ElementwiseGradExplicitOp(const std::string& type) : OpLite(type) {} explicit ElementwiseGradExplicitOp(const std::string& type) : OpLite(type) {}
...@@ -99,11 +99,14 @@ class ElementwiseGradExplicitOp : public OpLite { ...@@ -99,11 +99,14 @@ class ElementwiseGradExplicitOp : public OpLite {
private: private:
mutable operators::ElementwiseGradParam param_; mutable operators::ElementwiseGradParam param_;
}; };
#endif
} // namespace operators } // namespace operators
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_LITE_OP(elementwise_sub, paddle::lite::operators::ElementwiseOp); REGISTER_LITE_OP(elementwise_sub, paddle::lite::operators::ElementwiseOp);
#ifdef LITE_WITH_X86
REGISTER_LITE_OP(elementwise_sub_grad, REGISTER_LITE_OP(elementwise_sub_grad,
paddle::lite::operators::ElementwiseGradExplicitOp); paddle::lite::operators::ElementwiseGradExplicitOp);
#endif
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
...@@ -35,14 +34,13 @@ class FillConstantOp : public OpLite { ...@@ -35,14 +34,13 @@ class FillConstantOp : public OpLite {
} }
bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override { bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override {
CHECK_EQ(opdesc.Inputs().size(), 2UL);
auto Out_name = opdesc.Output("Out").front(); auto Out_name = opdesc.Output("Out").front();
param_.Out = GetMutableVar<Tensor>(scope, Out_name); param_.Out = GetMutableVar<Tensor>(scope, Out_name);
param_.dtype = boost::get<int>(opdesc.GetAttr("dtype")); param_.dtype = GetAttr<int>(opdesc.GetAttr("dtype"));
param_.shape = boost::get<std::vector<int64_t>>(opdesc.GetAttr("shape")); param_.shape = GetAttr<std::vector<int64_t>>(opdesc.GetAttr("shape"));
param_.value = boost::get<float>(opdesc.GetAttr("value")); param_.value = GetAttr<float>(opdesc.GetAttr("value"));
param_.force_cpu = boost::get<bool>(opdesc.GetAttr("force_cpu")); param_.force_cpu = GetAttr<bool>(opdesc.GetAttr("force_cpu"));
return true; return true;
} }
......
...@@ -12,7 +12,9 @@ ...@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#ifdef LITE_WITH_X86
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#endif
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
...@@ -36,7 +38,6 @@ class MeanOp : public OpLite { ...@@ -36,7 +38,6 @@ class MeanOp : public OpLite {
} }
bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override { bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override {
CHECK_EQ(opdesc.Inputs().size(), 2UL);
auto X_name = opdesc.Input("X").front(); auto X_name = opdesc.Input("X").front();
auto Out_name = opdesc.Output("Out").front(); auto Out_name = opdesc.Output("Out").front();
...@@ -53,6 +54,7 @@ class MeanOp : public OpLite { ...@@ -53,6 +54,7 @@ class MeanOp : public OpLite {
mutable operators::ElementwiseParam param_; mutable operators::ElementwiseParam param_;
}; };
#ifdef LITE_WITH_X86
class MeanGradOp : public OpLite { class MeanGradOp : public OpLite {
public: public:
explicit MeanGradOp(const std::string& type) : OpLite(type) {} explicit MeanGradOp(const std::string& type) : OpLite(type) {}
...@@ -89,10 +91,13 @@ class MeanGradOp : public OpLite { ...@@ -89,10 +91,13 @@ class MeanGradOp : public OpLite {
private: private:
mutable operators::MeanGradParam param_; mutable operators::MeanGradParam param_;
}; };
#endif
} // namespace operators } // namespace operators
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_LITE_OP(mean, paddle::lite::operators::MeanOp); REGISTER_LITE_OP(mean, paddle::lite::operators::MeanOp);
#ifdef LITE_WITH_X86
REGISTER_LITE_OP(mean_grad, paddle::lite::operators::MeanGradOp); REGISTER_LITE_OP(mean_grad, paddle::lite::operators::MeanGradOp);
#endif
...@@ -31,15 +31,16 @@ bool MulOpLite::CheckShape() const { ...@@ -31,15 +31,16 @@ bool MulOpLite::CheckShape() const {
CHECK_GT_OR_FALSE(x_dims.size(), static_cast<size_t>(param_.x_num_col_dims)); CHECK_GT_OR_FALSE(x_dims.size(), static_cast<size_t>(param_.x_num_col_dims));
CHECK_GT_OR_FALSE(y_dims.size(), static_cast<size_t>(param_.y_num_col_dims)); CHECK_GT_OR_FALSE(y_dims.size(), static_cast<size_t>(param_.y_num_col_dims));
auto x_mat_dims = // auto x_mat_dims =
framework::flatten_to_2d(x_dims.data(), param_.x_num_col_dims); // framework::flatten_to_2d(x_dims.data(), param_.x_num_col_dims);
auto y_mat_dims = // auto y_mat_dims =
framework::flatten_to_2d(y_dims.data(), param_.y_num_col_dims); // framework::flatten_to_2d(y_dims.data(), param_.y_num_col_dims);
PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0], // PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0],
"First matrix's width must be equal with second matrix's " // "First matrix's width must be equal with second matrix's
"height. %s, %s", // "
x_mat_dims[1], y_mat_dims[0]); // "height. %s, %s",
// x_mat_dims[1], y_mat_dims[0]);
return true; return true;
} }
...@@ -66,6 +67,8 @@ bool MulOpLite::InferShape() const { ...@@ -66,6 +67,8 @@ bool MulOpLite::InferShape() const {
return true; return true;
} }
#ifdef LITE_WITH_X86
bool MulGradOpLite::CheckShape() const { bool MulGradOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.x); CHECK_OR_FALSE(param_.x);
CHECK_OR_FALSE(param_.y); CHECK_OR_FALSE(param_.y);
...@@ -97,6 +100,7 @@ bool MulGradOpLite::AttachImpl(const OpDesc &op_desc, lite::Scope *scope) { ...@@ -97,6 +100,7 @@ bool MulGradOpLite::AttachImpl(const OpDesc &op_desc, lite::Scope *scope) {
return true; return true;
} }
#endif
} // namespace operators } // namespace operators
} // namespace lite } // namespace lite
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册