提交 d71b8325 编写于 作者: L liuruilong

format files

上级 3a407aef
......@@ -18,18 +18,15 @@ namespace paddle_mobile {
enum class Precision : int { FP32 = 0 };
template <Precision p>
struct PrecisionTrait{
struct PrecisionTrait {
typedef void ptype;
};
template <>
struct PrecisionTrait<Precision::FP32>{
struct PrecisionTrait<Precision::FP32> {
typedef float ptype;
};
//! device type
enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 };
......
......@@ -17,8 +17,5 @@ limitations under the License. */
#include "operators/conv_op.h"
namespace paddle_mobile {
namespace framework {
} // namespace framework
namespace framework {} // namespace framework
} // namespace paddle_mobile
......@@ -28,9 +28,5 @@ limitations under the License. */
#include "variable.h"
namespace paddle_mobile {
namespace framework {
} // namespace framework
namespace framework {} // namespace framework
} // namespace paddle_mobile
......@@ -18,9 +18,9 @@ limitations under the License. */
#include "common/enforce.h"
#include "common/log.h"
#include "framework/operator.h"
#include "framework/framework.pb.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/program/program_desc.h"
#include "framework/scope.h"
#include "framework/tensor.h"
......@@ -144,25 +144,26 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
std::make_shared<framework::Scope>();
program.scope = scope;
// originProgramDesc->Block(0);
// for (const auto &block : originProgramDesc->Blocks()) {
// for (int i = 0; i < block->Vars().size(); ++i) {
// std::shared_ptr<framework::VarDesc> var_desc = block->Vars()[i];
//// auto var = scope->Var(var_desc->Name());
// if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
// if (var_desc->Persistable() &&
// var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
// var_desc->GetType() != framework::proto::VarType::FETCH_LIST) {
// // auto tensor = var->GetMutable<framework::LoDTensor>();
// // to load
// // LoadVar(tensor, dirname + "/" + var_desc->Name());
// }
// } else {
// // TODO(codeWorm): some.
// }
// }
// }
// originProgramDesc->Block(0);
// for (const auto &block : originProgramDesc->Blocks()) {
// for (int i = 0; i < block->Vars().size(); ++i) {
// std::shared_ptr<framework::VarDesc> var_desc = block->Vars()[i];
//// auto var = scope->Var(var_desc->Name());
// if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
// if (var_desc->Persistable() &&
// var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH
// && var_desc->GetType() != framework::proto::VarType::FETCH_LIST)
// {
// // auto tensor = var->GetMutable<framework::LoDTensor>();
// // to load
// // LoadVar(tensor, dirname + "/" + var_desc->Name());
// }
// } else {
// // TODO(codeWorm): some.
// }
// }
// }
#ifdef PADDLE_MOBILE_DEBUG
for (const auto &block : program_desc_proto.blocks()) {
......@@ -323,7 +324,6 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
template class Loader<CPU, Precision::FP32>;
#pragma mark - executor
template <typename Dtype, Precision P>
......@@ -334,22 +334,26 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p) : program_(p) {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<framework::BlockDesc>> blocks = to_predict_program_->Blocks();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<framework::BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<framework::OpDesc>> ops = block_desc->Ops();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<framework::OpDesc> op = ops[j];
// auto op_base = framework::OpRegistry<Dtype>::CreateOp(op->Type(),
// op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), program_.scope);
// op_base->InferShape();
}
}
InitMemory();
const std::vector<std::shared_ptr<framework::BlockDesc>> blocks =
to_predict_program_->Blocks();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<framework::BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<framework::OpDesc>> ops = block_desc->Ops();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<framework::OpDesc> op = ops[j];
// auto op_base =
// framework::OpRegistry<Dtype>::CreateOp(op->Type(),
// op->GetInputs(), op->GetOutputs(),
// op->GetAttrMap(), program_.scope);
// op_base->InferShape();
}
}
InitMemory();
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(framework::LoDTensor *tensor, const std::string &file_path){
void Executor<Dtype, P>::LoadMemory(framework::LoDTensor *tensor,
const std::string &file_path) {
std::ifstream is(file_path);
PADDLE_MOBILE_ENFORCE(is.is_open(), "open file: %s failed",
file_path.c_str());
......@@ -444,7 +448,8 @@ void Executor<Dtype, P>::InitMemory() {
}
template <typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::predict(framework::Tensor &t) {
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::predict(
framework::Tensor &t) {
// feed
auto scope = program_.scope;
framework::Variable *g_feed_value = scope->Var("pixel");
......@@ -452,13 +457,15 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::predict(framework::Tensor
tensor->ShareDataWith(t);
framework::Variable *con_output = scope->Var("conv2d_0.tmp_0");
framework::Tensor *output_tensor = con_output->GetMutable<framework::Tensor>();
framework::Tensor *output_tensor =
con_output->GetMutable<framework::Tensor>();
output_tensor->mutable_data<float>({1, 16, 32, 32});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims() <<
// std::endl;
std::shared_ptr<framework::Tensor> out_tensor = std::make_shared<framework::LoDTensor>();
std::shared_ptr<framework::Tensor> out_tensor =
std::make_shared<framework::LoDTensor>();
out_tensor.reset(output_tensor);
predict(t, 0);
......@@ -472,7 +479,7 @@ void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) {
feed_tensor->ShareDataWith(t);
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
op->Run();
......@@ -480,14 +487,14 @@ void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) {
}
template <typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::predict(const std::vector<Ptype> &input, const std::vector<int64_t> &dims){
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
DLOG << "start predict: ";
framework::Tensor tensor;
auto ddim = framework::make_ddim(dims);
auto input_ptr = tensor.mutable_data<Ptype >(ddim);
auto input_ptr = tensor.mutable_data<Ptype>(ddim);
for (int i = 0; i < input.size(); ++i) {
input_ptr[i] = input[i];
}
......@@ -497,7 +504,6 @@ std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::predict(cons
framework::Variable *g_feed_value = program_.scope->Var("col");
auto feed_tensor = g_feed_value->GetMutable<framework::Tensor>();
return {};
}
......
......@@ -14,17 +14,16 @@ limitations under the License. */
#pragma once
#include <memory.h>
#include <string>
#include <vector>
#include <memory.h>
#include "common/types.h"
#include "framework/tensor.h"
#include "framework/operator.h"
#include "framework/lod_tensor.h"
#include "framework/program/program.h"
#include "framework/operator.h"
#include "framework/paddle_mobile_object.h"
#include "framework/program/program.h"
#include "framework/tensor.h"
namespace paddle_mobile {
......@@ -48,7 +47,8 @@ class Executor {
std::shared_ptr<framework::Tensor> predict(framework::Tensor &t);
std::vector<Ptype> predict(const std::vector<Ptype> &input, const std::vector<int64_t> &dims);
std::vector<Ptype> predict(const std::vector<Ptype> &input,
const std::vector<int64_t> &dims);
protected:
void InitMemory();
......@@ -57,8 +57,8 @@ class Executor {
std::shared_ptr<framework::ProgramDesc> to_predict_program_;
void predict(const framework::Tensor &t, int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype> >>>
ops_of_block_;
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
};
......
......@@ -24,14 +24,12 @@ template <typename DeviceType, typename T>
class FeedOp : framework::OperatorBase<DeviceType> {
public:
FeedOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
param_.Out()->ShareDataWith(*param_.InputX());
}
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void InferShape() const {
auto x_dims = param_.InputX()->dims();
......@@ -43,8 +41,8 @@ class FeedOp : framework::OperatorBase<DeviceType> {
};
namespace ops = paddle_mobile::operators;
//USE_OP(Feed);
//REGISTER_OPERATOR(Feed, ops::FeedOp);
// USE_OP(Feed);
// REGISTER_OPERATOR(Feed, ops::FeedOp);
}
}
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
//
// Created by liuRuiLong on 2018/5/25.
//
......
......@@ -24,14 +24,12 @@ template <typename DeviceType, typename T>
class FetchOp : framework::OperatorBase<DeviceType> {
public:
FetchOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
param_.Out()->ShareDataWith(*param_.InputX());
}
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void InferShape() const {
auto x_dims = param_.InputX()->dims();
......@@ -43,8 +41,8 @@ class FetchOp : framework::OperatorBase<DeviceType> {
};
namespace ops = paddle_mobile::operators;
//USE_OP(Fetch);
//REGISTER_OPERATOR(Fetch, ops::FetchOp);
// USE_OP(Fetch);
// REGISTER_OPERATOR(Fetch, ops::FetchOp);
}
}
} // namespace operators
} // namespace paddle_mobile
......@@ -531,29 +531,30 @@ class SoftmaxParam : public OpParam {
class FeedParam : public OpParam {
public:
FeedParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
out_ = OutFrom<framework::Tensor>(outputs, scope);
}
const Tensor *InputX() const { return input_x_; }
Tensor *Out() const { return out_; }
private:
Tensor *input_x_;
Tensor *out_;
};
class FetchParam : public OpParam {
public:
FetchParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
out_ = OutFrom<framework::Tensor>(outputs, scope);
}
const Tensor *InputX() const { return input_x_; }
Tensor *Out() const { return out_; }
private:
Tensor *input_x_;
Tensor *out_;
......
......@@ -15,16 +15,16 @@ limitations under the License. */
#pragma once
#include <string>
#include "io.h"
#include "common/log.h"
#include "framework/executor.h"
#include "io.h"
#include "operators/conv_op.h"
#include "operators/pool_op.h"
#include "operators/softmax_op.h"
using paddle_mobile::Executor;
using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim;
using paddle_mobile::Executor;
using paddle_mobile::framework::LoDTensor;
using paddle_mobile::framework::OpDesc;
using paddle_mobile::framework::Program;
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include "../test_include.h"
#include "framework/executor.h"
int main(){
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
......@@ -30,7 +30,7 @@ int main(){
std::vector<int64_t> dims{1, 3, 224, 224};
GetInput<float>(g_test_image_1x3x224x224, &input, dims);
// DLOG << " input: " << input;
// DLOG << " input: " << input;
executor.predict(input, dims);
return 0;
......
......@@ -14,20 +14,21 @@ limitations under the License. */
#pragma once
#include <random>
#include <fstream>
#include <random>
#include "common/log.h"
#include "framework/ddim.h"
#include "framework/tensor.h"
static const std::string g_google = "../models/googlenet";
static const std::string g_mobilenet = "../models/mobilenet";
static const std::string g_mobilenet_ssd = "../models/mobilenet";
static const std::string g_squeezenet = "../models/squeezenet";
static const std::string g_resnet = "../models/image_classification_resnet.inference.model";
static const std::string g_test_image_1x3x224x224 = "../images/test_image_1x3x224x224_float";
static const std::string g_resnet =
"../models/image_classification_resnet.inference.model";
static const std::string g_test_image_1x3x224x224 =
"../images/test_image_1x3x224x224_float";
template <typename T>
void SetupTensor(paddle_mobile::framework::Tensor *input,
......@@ -43,7 +44,8 @@ void SetupTensor(paddle_mobile::framework::Tensor *input,
}
template <typename T>
void GetInput(const std::string &input_name, std::vector<T> *input, const std::vector<int64_t> &dims){
void GetInput(const std::string &input_name, std::vector<T> *input,
const std::vector<int64_t> &dims) {
int size = 1;
for (const auto &dim : dims) {
size *= dim;
......
......@@ -18,15 +18,15 @@ limitations under the License. */
#include <string>
#include <vector>
#include "io.h"
#include "common/log.h"
#include "./test_helper.h"
#include "framework/scope.h"
#include "framework/tensor.h"
#include "framework/variable.h"
#include "framework/operator.h"
#include "framework/lod_tensor.h"
#include "common/log.h"
#include "framework/framework.pb.h"
#include "framework/program/program.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/program/block_desc.h"
#include "framework/program/program.h"
#include "framework/program/program_desc.h"
#include "framework/scope.h"
#include "framework/tensor.h"
#include "framework/variable.h"
#include "io.h"
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册