未验证 提交 69e98260 编写于 作者: L liuwei1031 提交者: GitHub

fix compilation issue (#17725)

上级 e2c1630a
......@@ -12,7 +12,7 @@ cc_library(activation_ops_lite SRCS activation_ops.cc DEPS ${op_DEPS})
cc_library(elementwise_ops_lite SRCS elementwise_ops.cc DEPS ${op_DEPS})
cc_library(mean_op_lite SRCS mean_op.cc DEPS ${op_DEPS})
cc_library(fill_constant_op_lite SRCS fill_constant_op.cc DEPS ${op_DEPS})
cc_library(sgd_op_lite SRCS sgd_op.cc DEPS ${op_DEPS})
#cc_library(sgd_op_lite SRCS sgd_op.cc DEPS ${op_DEPS})
cc_library(op_params_lite SRCS op_params.cc DEPS ${tensor_lite} any_lite framework_proto_lite)
set(ops_lite
......
......@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef LITE_WITH_X86
#include "paddle/fluid/framework/operator.h"
#endif
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h"
......@@ -48,6 +50,7 @@ class ActivationOp : public OpLite {
mutable ActivationParam param_;
};
#ifdef LITE_WITH_X86
class ActivationGradOp : public OpLite {
public:
explicit ActivationGradOp(const std::string& type) : OpLite(type) {}
......@@ -79,10 +82,13 @@ class ActivationGradOp : public OpLite {
private:
mutable ActivationGradParam param_;
};
#endif
} // namespace operators
} // namespace lite
} // namespace paddle
REGISTER_LITE_OP(square, paddle::lite::operators::ActivationOp);
#ifdef LITE_WITH_X86
REGISTER_LITE_OP(square_grad, paddle::lite::operators::ActivationGradOp);
#endif
......@@ -37,7 +37,6 @@ class ElementwiseOp : public OpLite {
}
bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override {
CHECK_EQ(opdesc.Inputs().size(), 2UL);
auto X_name = opdesc.Input("X").front();
auto Y_name = opdesc.Input("Y").front();
auto Out_name = opdesc.Output("Out").front();
......@@ -45,7 +44,7 @@ class ElementwiseOp : public OpLite {
param_.X = GetVar<lite::Tensor>(scope, X_name);
param_.Y = GetVar<lite::Tensor>(scope, Y_name);
param_.Out = GetMutableVar<Tensor>(scope, Out_name);
param_.axis = boost::get<int>(opdesc.GetAttr("axis"));
param_.axis = GetAttr<int>(opdesc.GetAttr("axis"));
return true;
}
......@@ -58,6 +57,7 @@ class ElementwiseOp : public OpLite {
mutable operators::ElementwiseParam param_;
};
#ifdef LITE_WITH_X86
class ElementwiseGradExplicitOp : public OpLite {
public:
explicit ElementwiseGradExplicitOp(const std::string& type) : OpLite(type) {}
......@@ -99,11 +99,14 @@ class ElementwiseGradExplicitOp : public OpLite {
private:
mutable operators::ElementwiseGradParam param_;
};
#endif
} // namespace operators
} // namespace lite
} // namespace paddle
REGISTER_LITE_OP(elementwise_sub, paddle::lite::operators::ElementwiseOp);
#ifdef LITE_WITH_X86
REGISTER_LITE_OP(elementwise_sub_grad,
paddle::lite::operators::ElementwiseGradExplicitOp);
#endif
......@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h"
......@@ -35,14 +34,13 @@ class FillConstantOp : public OpLite {
}
bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override {
CHECK_EQ(opdesc.Inputs().size(), 2UL);
auto Out_name = opdesc.Output("Out").front();
param_.Out = GetMutableVar<Tensor>(scope, Out_name);
param_.dtype = boost::get<int>(opdesc.GetAttr("dtype"));
param_.shape = boost::get<std::vector<int64_t>>(opdesc.GetAttr("shape"));
param_.value = boost::get<float>(opdesc.GetAttr("value"));
param_.force_cpu = boost::get<bool>(opdesc.GetAttr("force_cpu"));
param_.dtype = GetAttr<int>(opdesc.GetAttr("dtype"));
param_.shape = GetAttr<std::vector<int64_t>>(opdesc.GetAttr("shape"));
param_.value = GetAttr<float>(opdesc.GetAttr("value"));
param_.force_cpu = GetAttr<bool>(opdesc.GetAttr("force_cpu"));
return true;
}
......
......@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef LITE_WITH_X86
#include "paddle/fluid/framework/operator.h"
#endif
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h"
......@@ -36,7 +38,6 @@ class MeanOp : public OpLite {
}
bool AttachImpl(const OpDesc& opdesc, lite::Scope* scope) override {
CHECK_EQ(opdesc.Inputs().size(), 2UL);
auto X_name = opdesc.Input("X").front();
auto Out_name = opdesc.Output("Out").front();
......@@ -53,6 +54,7 @@ class MeanOp : public OpLite {
mutable operators::ElementwiseParam param_;
};
#ifdef LITE_WITH_X86
class MeanGradOp : public OpLite {
public:
explicit MeanGradOp(const std::string& type) : OpLite(type) {}
......@@ -89,10 +91,13 @@ class MeanGradOp : public OpLite {
private:
mutable operators::MeanGradParam param_;
};
#endif
} // namespace operators
} // namespace lite
} // namespace paddle
REGISTER_LITE_OP(mean, paddle::lite::operators::MeanOp);
#ifdef LITE_WITH_X86
REGISTER_LITE_OP(mean_grad, paddle::lite::operators::MeanGradOp);
#endif
......@@ -31,15 +31,16 @@ bool MulOpLite::CheckShape() const {
CHECK_GT_OR_FALSE(x_dims.size(), static_cast<size_t>(param_.x_num_col_dims));
CHECK_GT_OR_FALSE(y_dims.size(), static_cast<size_t>(param_.y_num_col_dims));
auto x_mat_dims =
framework::flatten_to_2d(x_dims.data(), param_.x_num_col_dims);
auto y_mat_dims =
framework::flatten_to_2d(y_dims.data(), param_.y_num_col_dims);
PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0],
"First matrix's width must be equal with second matrix's "
"height. %s, %s",
x_mat_dims[1], y_mat_dims[0]);
// auto x_mat_dims =
// framework::flatten_to_2d(x_dims.data(), param_.x_num_col_dims);
// auto y_mat_dims =
// framework::flatten_to_2d(y_dims.data(), param_.y_num_col_dims);
// PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0],
// "First matrix's width must be equal with second matrix's
// "
// "height. %s, %s",
// x_mat_dims[1], y_mat_dims[0]);
return true;
}
......@@ -66,6 +67,8 @@ bool MulOpLite::InferShape() const {
return true;
}
#ifdef LITE_WITH_X86
bool MulGradOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.x);
CHECK_OR_FALSE(param_.y);
......@@ -97,6 +100,7 @@ bool MulGradOpLite::AttachImpl(const OpDesc &op_desc, lite::Scope *scope) {
return true;
}
#endif
} // namespace operators
} // namespace lite
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册