提交 f68f1480 编写于 作者: L liuruilong

format files

上级 1f6f8d57
......@@ -117,6 +117,5 @@ static std::unordered_map<
{G_OP_TYPE_PRIOR_BOX, {{"Image", "Input"}, {"Boxes", "Variances"}}},
{G_OP_TYPE_MULTICLASS_NMS, {{"BBoxes", "Scores"}, {"Out"}}},
{G_OP_TYPE_RESHAPE, {{"X"}, {"Out"}}},
{G_OP_TYPE_DEPTHWISE_CONV, {{"Input"}, {"Output"}}}
};
{G_OP_TYPE_DEPTHWISE_CONV, {{"Input"}, {"Output"}}}};
} // namespace paddle_mobile
......@@ -145,7 +145,9 @@ class FusionOpMatcher : PaddleMobileObject {
virtual std::string Type() = 0;
virtual void FolderNodes(Node *node, std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
virtual void FolderNodes(
Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
node->Folder(node_.Depth(), Type(), {}, removed_nodes);
}
......
......@@ -254,8 +254,7 @@ void Node::Folder(
std::shared_ptr<framework::OpDesc> op_desc,
std::vector<std::shared_ptr<Node>> *outputs, uint index,
std::map<std::string, std::pair<std::string, std::string>> *change,
Node *begin_node,
std::vector<std::shared_ptr<Node>> *removed_nodes) {
Node *begin_node, std::vector<std::shared_ptr<Node>> *removed_nodes) {
if (change->find(this->type_) != change->end()) {
auto change_pair = (*change)[this->type_];
op_desc->GetInputs()[change_pair.second] =
......@@ -269,7 +268,8 @@ void Node::Folder(
--index;
for (auto output : outputs_) {
removed_nodes->push_back(output);
output->Folder(op_desc, outputs, index, change, begin_node, removed_nodes);
output->Folder(op_desc, outputs, index, change, begin_node,
removed_nodes);
}
} else {
for (auto &op_output : this->op_desc_->outputs_) {
......@@ -282,7 +282,6 @@ void Node::Folder(
if (iter != output->inputs_.end()) {
output->inputs_.erase(iter);
}
output->inputs_.push_back(begin_node);
outputs->push_back(output);
......
......@@ -43,7 +43,8 @@ class Node : PaddleMobileObject {
uint Depth(uint begin = 0);
Node &Folder(
uint size, std::string type,
std::map<std::string, std::pair<std::string, std::string>> change_map, std::vector<std::shared_ptr<Node>> *removed_nodes);
std::map<std::string, std::pair<std::string, std::string>> change_map,
std::vector<std::shared_ptr<Node>> *removed_nodes);
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(uint size);
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs();
std::shared_ptr<framework::OpDesc> OpDescOfNode() { return op_desc_; }
......@@ -63,8 +64,7 @@ class Node : PaddleMobileObject {
std::shared_ptr<framework::OpDesc> op_desc,
std::vector<std::shared_ptr<Node>> *outputs, uint index,
std::map<std::string, std::pair<std::string, std::string>> *change,
Node *begin_node,
std::vector<std::shared_ptr<Node>> *removed_nodes);
Node *begin_node, std::vector<std::shared_ptr<Node>> *removed_nodes);
std::shared_ptr<framework::OpDesc> op_desc_;
std::string ToString(std::string blank, const Node *node) const;
std::vector<std::shared_ptr<Node>> outputs_;
......
......@@ -31,7 +31,6 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
std::unordered_map<std::string, std::vector<std::shared_ptr<Node>>>
type_map;
std::vector<std::shared_ptr<Node>> nodes;
std::shared_ptr<Node> begin_node;
......@@ -41,7 +40,8 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
auto op = block->Ops()[j];
auto op_type = op->Type();
if (op_input_output_key.find(op->Type()) == op_input_output_key.end()) {
LOG(kLOG_ERROR) << "has not support op return null " << " op type: " << op->Type();
LOG(kLOG_ERROR) << "has not support op return null "
<< " op type: " << op->Type();
return nullptr;
}
......@@ -97,14 +97,15 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
for (int j = 0; j < removed_nodes.size(); ++j) {
auto removed_node = removed_nodes[j];
auto removed_ite = std::find(nodes.begin(), nodes.end(), removed_node);
auto removed_ite =
std::find(nodes.begin(), nodes.end(), removed_node);
nodes.erase(removed_ite);
}
}
}
}
// DLOG << "node: \n" << *begin_node;
// DLOG << "node: \n" << *begin_node;
std::vector<std::shared_ptr<framework::OpDesc>> op_descs;
// bool can_splite = begin_node->CanSplit({G_OP_TYPE_CONV,
......@@ -113,7 +114,7 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
auto &node = nodes[m];
op_descs.push_back(node->op_desc_);
}
// GenerateOps(&op_descs, begin_node.get());
// GenerateOps(&op_descs, begin_node.get());
block->ops_ = op_descs;
}
......@@ -128,7 +129,6 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
void ProgramOptimize::GenerateOps(
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, Node *input_node,
Node *current_node) {
if (current_node->inputs_.size() > 1 &&
input_node != current_node->inputs_.back()) {
DLOG << " current type " << current_node->type_;
......
......@@ -17,9 +17,7 @@ namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T>
void FushionConvAddOp<Dtype, T>::InferShape() const {
}
void FushionConvAddOp<Dtype, T>::InferShape() const {}
template class FushionConvAddOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......
......@@ -31,10 +31,13 @@ class FusionConvAddMatcher : public framework::FusionOpMatcher {
node_ > std::make_shared<framework::Node>(G_OP_TYPE_ELEMENTWISE_ADD);
}
void FolderNodes(framework::Node *node, std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(), {{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Y"}}}, removed_nodes);
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Y"}}}, removed_nodes);
}
std::string Type() { return G_OP_TYPE_CONV_ADD; }
......@@ -44,14 +47,13 @@ template <typename DeviceType, typename T>
class FushionConvAddOp : public framework::OperatorWithKernel<DeviceType> {
public:
FushionConvAddOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope) {}
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope) {}
void RunImpl() const {
}
void RunImpl() const {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
......@@ -60,7 +62,7 @@ class FushionConvAddOp : public framework::OperatorWithKernel<DeviceType> {
// FushionFcParam param_;
};
//static framework::FusionOpRegistrar fc_registrar(new FusionConvAddMatcher());
// static framework::FusionOpRegistrar fc_registrar(new FusionConvAddMatcher());
} // namespace operators
} // namespace paddle_mobile
......@@ -28,7 +28,9 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher {
std::make_shared<framework::Node>(G_OP_TYPE_RELU);
}
void FolderNodes(framework::Node *node, std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
std::vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
......
......@@ -32,7 +32,9 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
node_ > std::make_shared<framework::Node>(G_OP_TYPE_ELEMENTWISE_ADD);
}
void FolderNodes(framework::Node *node, std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
......@@ -65,7 +67,7 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
FushionFcParam param_;
};
//static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
// static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
} // namespace operators
} // namespace paddle_mobile
......@@ -17,10 +17,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
template<>
void ConvKernel<GPU_MALI, float>::Compute(const ConvParam &param) const
{}
template <>
void ConvKernel<GPU_MALI, float>::Compute(const ConvParam &param) const {}
template class ConvKernel<GPU_MALI, float>;
}
template class ConvKernel<GPU_MALI, float>;
} // namespace operators
} // namespace paddle_mobile
......@@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io.h"
#include "../test_helper.h"
#include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h"
#include "io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......@@ -25,7 +25,7 @@ int main() {
// program.originProgram->Description("origin");
auto optimize_program = optimize.FushionOptimize(program.originProgram);
if (optimize_program != nullptr) {
// optimize_program->Description("optimize");
// optimize_program->Description("optimize");
} else {
LOG(paddle_mobile::kLOG_ERROR) << "optimize_program is null";
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册