提交 4ef9a97f 编写于 作者: R Ruilong Liu 提交者: GitHub

Merge branch 'develop' into develop

cmake_minimum_required(VERSION 3.0)
cmake_minimum_required(VERSION 3.6)
project(paddle-mobile)
option(DEBUGING "enable debug mode" ON)
......
......@@ -84,7 +84,7 @@ struct Variant {
if (type_id == typeid(T).hash_code()) {
return *const_cast<T *>(reinterpret_cast<const T *>(&data));
} else {
PADDLE_MOBILE_THROW_EXCEPTION(" bad cast in variant ");
PADDLE_MOBILE_THROW_EXCEPTION(" bad cast in variant");
exit(0);
}
}
......
......@@ -42,8 +42,17 @@ class FusionOpRegister {
matchers_[matcher->Type()] = shared_matcher;
}
const std::map<std::string, std::shared_ptr<FusionOpMatcher>> Matchers() {
return matchers_;
const std::vector<std::shared_ptr<FusionOpMatcher>> Matchers() {
std::vector<std::shared_ptr<FusionOpMatcher>> matchers;
for (const auto& match : matchers_) {
matchers.push_back(match.second);
}
std::sort(matchers.begin(), matchers.end(),
[](std::shared_ptr<FusionOpMatcher> first,
std::shared_ptr<FusionOpMatcher> second) {
return first->BeginNode().Depth() > second->BeginNode().Depth();
});
return matchers;
}
private:
......
......@@ -44,23 +44,6 @@ bool Node::operator==(const Node &in) {
return true;
}
std::vector<std::shared_ptr<framework::OpDesc>> Node::OpDescs(int size) {
std::vector<std::shared_ptr<framework::OpDesc>> op_descs;
OpDescs(size - 1, &op_descs);
return op_descs;
}
void Node::OpDescs(int index,
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc) {
if (index == 0) {
return;
}
op_desc->push_back(this->op_desc_);
for (auto &output : outputs_) {
output->OpDescs(index, op_desc);
}
}
std::shared_ptr<Node> Node::To(int size) {
std::shared_ptr<Node> node = std::make_shared<Node>();
this->To(size - 1, node);
......
......@@ -47,13 +47,10 @@ class Node {
std::map<std::string, std::vector<std::pair<std::string, std::string>>>
change,
std::vector<std::shared_ptr<Node>> *removed_nodes);
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(int size);
std::shared_ptr<framework::OpDesc> OpDescOfNode() { return op_desc_; }
std::string Type() { return type_; }
private:
void OpDescs(int size,
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc);
void To(int index, std::shared_ptr<Node>);
void Folder(
std::shared_ptr<framework::OpDesc> op_desc,
......
......@@ -78,9 +78,8 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FusionOptimize(
}
for (auto &registed : FusionOpRegister::Instance()->Matchers()) {
std::string fusion_type = registed.first;
std::shared_ptr<FusionOpMatcher> matcher = registed.second;
// DLOG << " registed node \n " << matcher->BeginNode();
std::string fusion_type = registed->Type();
std::shared_ptr<FusionOpMatcher> matcher = registed;
auto match_vector = type_map[matcher->BeginType()];
......
......@@ -26,7 +26,7 @@ void BatchNormOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims();
this->param_.OutputY()->Resize(x_dims);
}
template class BatchNormOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -47,7 +47,7 @@ void BoxCoderOp<Dtype, T>::InferShape() const {
this->param_.OutputBox()->Resize(framework::make_ddim(
{input_targetbox_dims[0], input_priorbox_dims[0], 4}));
}
template class BoxCoderOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -56,7 +56,6 @@ void ConcatOp<Dtype, T>::InferShape() const {
this->param_.Out()->Resize(out_dims);
}
template class ConcatOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -48,8 +48,6 @@ void ConvOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
template class ConvOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -49,8 +49,6 @@ void DepthwiseConvOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
template class DepthwiseConvOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -22,7 +22,7 @@ void DropoutOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims);
}
template class DropoutOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -24,7 +24,7 @@ void ElementwiseAddOp<Dtype, T>::InferShape() const {
auto x_dim = this->param_.InputX()->dims();
this->param_.Out()->Resize(x_dim);
}
template class ElementwiseAddOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -14,10 +14,7 @@ limitations under the License. */
#include "feed_op.h"
namespace paddle_mobile {
namespace operators {
template class FeedOp<CPU, float>;
}
namespace operators {}
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
......
......@@ -14,10 +14,7 @@ limitations under the License. */
#include "fetch_op.h"
namespace paddle_mobile {
namespace operators {
template class FetchOp<CPU, float>;
}
namespace operators {}
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
......
......@@ -45,7 +45,6 @@ void FusionConvAddOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
template class FusionConvAddOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -36,8 +36,6 @@ class FusionConvAddMatcher : public framework::FusionOpMatcher {
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}}}, removed_nodes);
}
......
......@@ -44,7 +44,7 @@ void FusionConvAddBNReluOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_shape);
this->param_.Output()->Resize(ddim);
}
template class FusionConvAddBNReluOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -39,8 +39,6 @@ class FusionConvAddBNReluMatcher : public framework::FusionOpMatcher {
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}},
{G_OP_TYPE_BATCHNORM,
......
......@@ -44,7 +44,7 @@ void FusionDWConvBNReluOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_shape);
this->param_.Output()->Resize(ddim);
}
template class FusionDWConvBNReluOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -38,8 +38,6 @@ class FusionDWConvBNReluMatcher : public framework::FusionOpMatcher {
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_BATCHNORM,
{{"Scale", "Scale"},
......
......@@ -50,7 +50,6 @@ void FusionFcOp<Dtype, T>::InferShape() const {
this->param_.Out()->Resize(ddim);
}
template class FusionFcOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -47,8 +47,6 @@ void Im2SequenceOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
template class Im2SequenceOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -24,7 +24,7 @@ void LrnOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(x_dims);
}
template class LrnOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -50,7 +50,7 @@ void MulOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_dims);
this->param_.Out()->Resize(ddim);
}
template class MulOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -34,7 +34,7 @@ void MultiClassNMSOp<Dtype, T>::InferShape() const {
// pre size, will change in Compute.
this->param_.Out()->Resize(framework::make_ddim({input_bboxes_dims[1], 6}));
}
template class MultiClassNMSOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -371,7 +371,7 @@ class BatchNormParam : OpParam {
input_variance_ = InputVarianceFrom<LoDTensor>(inputs, scope);
epsilon_ = GetAttr<float>("epsilon", attrs);
momentum_ = GetAttr<float>("momentum", attrs);
is_test_ = GetAttr<bool>("is_test", attrs);
// is_test_ = GetAttr<bool>("is_test", attrs);
}
const Tensor *InputX() const { return input_x_; }
......
......@@ -54,7 +54,7 @@ void PoolOp<DeviceType, T>::InferShape() const {
}
this->param_.Output()->Resize(framework::make_ddim(output_shape));
}
template class PoolOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -23,7 +23,7 @@ void PReluOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims);
}
template class PReluOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -44,7 +44,7 @@ void PriorBoxOp<Dtype, T>::InferShape() const {
this->param_.OutputBoxes()->Resize(framework::make_ddim(dim_vec));
this->param_.OutputVariances()->Resize(framework::make_ddim(dim_vec));
}
template class PriorBoxOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -23,7 +23,7 @@ void ReluOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims);
}
template class ReluOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -27,7 +27,7 @@ void ReshapeOp<Dtype, T>::InferShape() const {
auto out_dims = ValidateShape(shape, input_x_dims);
this->param_.Out()->Resize(out_dims);
}
template class ReshapeOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -24,7 +24,7 @@ void ResizeOp<Dtype, T>::InferShape() const {
auto out_dims = CalOutputShape(this->param_);
this->param_.Out()->Resize(out_dims);
}
template class ResizeOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -24,7 +24,7 @@ void ScaleOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims);
}
template class ScaleOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -22,7 +22,7 @@ template <typename DeviceType, typename T>
void SigmoidOp<DeviceType, T>::InferShape() const {
this->param_.Out()->Resize(this->param_.InputX()->dims());
}
template class SigmoidOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -23,7 +23,7 @@ template <typename Dtype, typename T>
void SliceOp<Dtype, T>::InferShape() const {
/// todo: add InputShape() detection.
}
template class SliceOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -22,7 +22,7 @@ template <typename DeviceType, typename T>
void SoftmaxOp<DeviceType, T>::InferShape() const {
this->param_.Out()->Resize(this->param_.InputX()->dims());
}
template class SoftmaxOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -47,7 +47,7 @@ void TransposeOp<Dtype, T>::InferShape() const {
}
this->param_.Out()->Resize(out_dims);
}
template class TransposeOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册