提交 cd63d8a2 编写于 作者: E eclipsess

conflict

...@@ -58,6 +58,8 @@ cmake-build-release/ ...@@ -58,6 +58,8 @@ cmake-build-release/
test/models/ test/models/
test/images/
# Emacs intermediate files # Emacs intermediate files
*~ *~
......
...@@ -40,7 +40,7 @@ endif () ...@@ -40,7 +40,7 @@ endif ()
#add_dependencies(paddle-mobile openblas_proj) #add_dependencies(paddle-mobile openblas_proj)
# gen static # gen static
ADD_LIBRARY(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) ADD_LIBRARY(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H} src/operators/feed_op.cpp src/operators/feed_op.h src/operators/fetch_op.cpp src/operators/fetch_op.h)
if (ANDROID) if (ANDROID)
# openblas.a need log lib # openblas.a need log lib
target_link_libraries(paddle-mobile protobuf-lite) target_link_libraries(paddle-mobile protobuf-lite)
......
...@@ -170,11 +170,16 @@ struct ToLog { ...@@ -170,11 +170,16 @@ struct ToLog {
template <typename T> template <typename T>
Print &operator<<(Print &printer, const std::vector<T> &v) { Print &operator<<(Print &printer, const std::vector<T> &v) {
printer << "[ "; printer << "[\n ";
for (const auto &value : v) {
for (int i = 0; i < v.size(); ++i) {
const auto &value = v[i];
printer << value << " "; printer << value << " ";
if (i % 10 == 9) {
printer << "\n";
}
} }
printer << " ]"; printer << " \n]";
return printer; return printer;
} }
......
...@@ -17,6 +17,16 @@ limitations under the License. */ ...@@ -17,6 +17,16 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
enum class Precision : int { FP32 = 0 }; enum class Precision : int { FP32 = 0 };
template <Precision p>
struct PrecisionTrait {
typedef void ptype;
};
template <>
struct PrecisionTrait<Precision::FP32> {
typedef float ptype;
};
//! device type //! device type
enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 }; enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 };
......
...@@ -17,73 +17,5 @@ limitations under the License. */ ...@@ -17,73 +17,5 @@ limitations under the License. */
#include "operators/conv_op.h" #include "operators/conv_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {} // namespace framework
template <typename Dtype>
Executor<Dtype>::Executor(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
// const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// for (int i = 0; i < blocks.size(); ++i) {
// std::shared_ptr<BlockDesc> block_desc = blocks[i];
// std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
// for (int j = 0; j < ops.size(); ++j) {
// std::shared_ptr<OpDesc> op = ops[j];
// if (op->Type() == "conv2d" && op->Input("Input")[0] ==
// "pixel") {
// Attribute strides_attr = op->GetAttrMap().at("strides");
// std::vector<int> stride =
// strides_attr.Get<std::vector<int>>(); for (int k = 0; k <
// stride.size(); ++k) {
// }
// std::shared_ptr<operators::ConvOp<Dtype, float>> conv =
// std::make_shared<operators::ConvOp<Dtype, float>>(
// op->Type(), op->GetInputs(), op->GetOutputs(),
// op->GetAttrMap(), program_.scope);
// ops_of_block_[*block_desc.get()].push_back(conv);
// }
// }
// }
}
template <typename Dtype>
std::shared_ptr<Tensor> Executor<Dtype>::predict(Tensor &t) {
// feed
auto scope = program_.scope;
Variable *g_feed_value = scope->Var("pixel");
auto tensor = g_feed_value->GetMutable<Tensor>();
tensor->ShareDataWith(t);
Variable *con_output = scope->Var("conv2d_0.tmp_0");
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({1, 16, 32, 32});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims() <<
// std::endl;
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
predict(t, 0);
return out_tensor;
}
template <typename Dtype>
void Executor<Dtype>::predict(const Tensor &t, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
op->Run();
}
}
template class Executor<CPU>;
} // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -28,28 +28,5 @@ limitations under the License. */ ...@@ -28,28 +28,5 @@ limitations under the License. */
#include "variable.h" #include "variable.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {} // namespace framework
template <typename Dtype>
class Executor {
public:
Executor() = default;
Executor(const Program<Dtype> p);
std::shared_ptr<Tensor> predict(Tensor &t);
public:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
void predict(const Tensor &t, int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
};
} // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -28,6 +28,7 @@ class Program : PaddleMobileObject { ...@@ -28,6 +28,7 @@ class Program : PaddleMobileObject {
std::shared_ptr<ProgramDesc> originProgram; std::shared_ptr<ProgramDesc> originProgram;
std::shared_ptr<ProgramDesc> optimizeProgram; std::shared_ptr<ProgramDesc> optimizeProgram;
std::shared_ptr<Scope> scope; std::shared_ptr<Scope> scope;
std::string model_path;
private: private:
}; };
......
...@@ -20,6 +20,7 @@ limitations under the License. */ ...@@ -20,6 +20,7 @@ limitations under the License. */
#include "common/log.h" #include "common/log.h"
#include "framework/framework.pb.h" #include "framework/framework.pb.h"
#include "framework/lod_tensor.h" #include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/program/program_desc.h" #include "framework/program/program_desc.h"
#include "framework/scope.h" #include "framework/scope.h"
#include "framework/tensor.h" #include "framework/tensor.h"
...@@ -136,31 +137,33 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load( ...@@ -136,31 +137,33 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
std::make_shared<framework::ProgramDesc>(program_desc_proto); std::make_shared<framework::ProgramDesc>(program_desc_proto);
framework::Program<Dtype, P> program; framework::Program<Dtype, P> program;
program.model_path = dirname;
program.originProgram = originProgramDesc; program.originProgram = originProgramDesc;
std::shared_ptr<framework::Scope> scope = std::shared_ptr<framework::Scope> scope =
std::make_shared<framework::Scope>(); std::make_shared<framework::Scope>();
program.scope = scope; program.scope = scope;
originProgramDesc->Block(0); // originProgramDesc->Block(0);
for (const auto &block : originProgramDesc->Blocks()) { // for (const auto &block : originProgramDesc->Blocks()) {
for (int i = 0; i < block->Vars().size(); ++i) { // for (int i = 0; i < block->Vars().size(); ++i) {
std::shared_ptr<framework::VarDesc> var_desc = block->Vars()[i]; // std::shared_ptr<framework::VarDesc> var_desc = block->Vars()[i];
auto var = scope->Var(var_desc->Name()); //// auto var = scope->Var(var_desc->Name());
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { // if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
if (var_desc->Persistable() && // if (var_desc->Persistable() &&
var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH && // var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH
var_desc->GetType() != framework::proto::VarType::FETCH_LIST) { // && var_desc->GetType() != framework::proto::VarType::FETCH_LIST)
auto tensor = var->GetMutable<framework::LoDTensor>(); // {
// to load // // auto tensor = var->GetMutable<framework::LoDTensor>();
LoadVar(tensor, dirname + "/" + var_desc->Name()); // // to load
} // // LoadVar(tensor, dirname + "/" + var_desc->Name());
} else { // }
// TODO(codeWorm): some. // } else {
} // // TODO(codeWorm): some.
} // }
} // }
// }
#ifdef PADDLE_MOBILE_DEBUG #ifdef PADDLE_MOBILE_DEBUG
for (const auto &block : program_desc_proto.blocks()) { for (const auto &block : program_desc_proto.blocks()) {
...@@ -321,4 +324,189 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load( ...@@ -321,4 +324,189 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
template class Loader<CPU, Precision::FP32>; template class Loader<CPU, Precision::FP32>;
#pragma mark - executor
template <typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<framework::BlockDesc>> blocks =
to_predict_program_->Blocks();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<framework::BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<framework::OpDesc>> ops = block_desc->Ops();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<framework::OpDesc> op = ops[j];
// auto op_base =
// framework::OpRegistry<Dtype>::CreateOp(op->Type(),
// op->GetInputs(), op->GetOutputs(),
// op->GetAttrMap(), program_.scope);
// op_base->InferShape();
}
}
InitMemory();
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(framework::LoDTensor *tensor,
const std::string &file_path) {
std::ifstream is(file_path);
PADDLE_MOBILE_ENFORCE(is.is_open(), "open file: %s failed",
file_path.c_str());
std::fpos<mbstate_t> pos;
pos = is.tellg(); // save current position
is.seekg(0, std::ios::end);
is.seekg(pos); // restore saved position
// 1. version
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// 2 Lod information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
auto &lod = *tensor->mutable_lod();
lod.resize(lod_level);
for (uint64_t i = 0; i < lod_level; ++i) {
uint64_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::vector<size_t> tmp(size / sizeof(size_t));
is.read(reinterpret_cast<char *>(tmp.data()),
static_cast<std::streamsize>(size));
for (auto j : tmp) {
LOG(kLOG_DEBUG1) << " lod - " << j;
}
lod[i] = tmp;
}
// 3. tensor version
uint32_t tensor_version;
is.read(reinterpret_cast<char *>(&tensor_version), sizeof(tensor_version));
// 4. tensor desc
int32_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char *>(buf.get()), size);
framework::proto::VarType::TensorDesc desc;
desc.ParseFromArray(buf.get(), size);
int memory_size = 1;
for (auto l : desc.dims()) {
memory_size *= l;
}
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
tensor->Resize(framework::make_ddim(dims));
void *memory = tensor;
int type_size = 0;
switch (desc.data_type()) {
case framework::proto::VarType::FP16:
type_size = 2;
break;
case framework::proto::VarType::FP32:
type_size = 4;
memory = tensor->mutable_data<float>();
break;
case framework::proto::VarType::FP64:
type_size = 8;
break;
case framework::proto::VarType::INT32:
type_size = 4;
break;
case framework::proto::VarType::INT64:
type_size = 8;
break;
case framework::proto::VarType::BOOL:
type_size = 1;
break;
default:
break;
}
is.read(static_cast<char *>(memory), memory_size * type_size);
is.close();
};
template <typename Dtype, Precision P>
void Executor<Dtype, P>::InitMemory() {
for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) {
auto var = program_.scope->Var(var_desc->Name());
auto tensor = var->template GetMutable<framework::LoDTensor>();
LoadMemory(tensor, program_.model_path + "/" + var_desc->Name());
}
}
}
template <typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::predict(
framework::Tensor &t) {
// feed
auto scope = program_.scope;
framework::Variable *g_feed_value = scope->Var("pixel");
auto tensor = g_feed_value->GetMutable<framework::Tensor>();
tensor->ShareDataWith(t);
framework::Variable *con_output = scope->Var("conv2d_0.tmp_0");
framework::Tensor *output_tensor =
con_output->GetMutable<framework::Tensor>();
output_tensor->mutable_data<float>({1, 16, 32, 32});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims() <<
// std::endl;
std::shared_ptr<framework::Tensor> out_tensor =
std::make_shared<framework::LoDTensor>();
out_tensor.reset(output_tensor);
predict(t, 0);
return out_tensor;
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
auto feed_tensor = g_feed_value->GetMutable<framework::Tensor>();
feed_tensor->ShareDataWith(t);
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
op->Run();
}
}
template <typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
DLOG << "start predict: ";
framework::Tensor tensor;
auto ddim = framework::make_ddim(dims);
auto input_ptr = tensor.mutable_data<Ptype>(ddim);
for (int i = 0; i < input.size(); ++i) {
input_ptr[i] = input[i];
}
predict(tensor, 0);
framework::Variable *g_feed_value = program_.scope->Var("col");
auto feed_tensor = g_feed_value->GetMutable<framework::Tensor>();
return {};
}
template class Executor<CPU, Precision::FP32>;
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -14,12 +14,16 @@ limitations under the License. */ ...@@ -14,12 +14,16 @@ limitations under the License. */
#pragma once #pragma once
#include <memory.h>
#include <string> #include <string>
#include <vector>
#include "common/types.h" #include "common/types.h"
#include "framework/lod_tensor.h" #include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/paddle_mobile_object.h" #include "framework/paddle_mobile_object.h"
#include "framework/program/program.h" #include "framework/program/program.h"
#include "framework/tensor.h"
namespace paddle_mobile { namespace paddle_mobile {
...@@ -32,4 +36,30 @@ class Loader : PaddleMobileObject { ...@@ -32,4 +36,30 @@ class Loader : PaddleMobileObject {
void LoadVar(framework::LoDTensor *tensor, const std::string &file_path); void LoadVar(framework::LoDTensor *tensor, const std::string &file_path);
}; };
template <typename Dtype, Precision P = Precision::FP32>
class Executor {
public:
typedef typename PrecisionTrait<P>::ptype Ptype;
Executor() = default;
Executor(const framework::Program<Dtype> p);
std::shared_ptr<framework::Tensor> predict(framework::Tensor &t);
std::vector<Ptype> predict(const std::vector<Ptype> &input,
const std::vector<int64_t> &dims);
protected:
void InitMemory();
void LoadMemory(framework::LoDTensor *tensor, const std::string &file_path);
const framework::Program<Dtype> program_;
std::shared_ptr<framework::ProgramDesc> to_predict_program_;
void predict(const framework::Tensor &t, int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
};
} // namespace paddle_mobile } // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "feed_op.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FeedOp : framework::OperatorBase<DeviceType> {
public:
FeedOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void InferShape() const {
auto x_dims = param_.InputX()->dims();
param_.Out()->Resize(x_dims);
}
protected:
FeedParam param_;
};
namespace ops = paddle_mobile::operators;
// USE_OP(Feed);
// REGISTER_OPERATOR(Feed, ops::FeedOp);
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
//
// Created by liuRuiLong on 2018/5/25.
//
#include "fetch_op.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FetchOp : framework::OperatorBase<DeviceType> {
public:
FetchOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void InferShape() const {
auto x_dims = param_.InputX()->dims();
param_.Out()->Resize(x_dims);
}
protected:
FetchParam param_;
};
namespace ops = paddle_mobile::operators;
// USE_OP(Fetch);
// REGISTER_OPERATOR(Fetch, ops::FetchOp);
} // namespace operators
} // namespace paddle_mobile
...@@ -588,6 +588,38 @@ class MultiClassNMSParam : public OpParam { ...@@ -588,6 +588,38 @@ class MultiClassNMSParam : public OpParam {
float score_threshold_; float score_threshold_;
}; };
class FeedParam : public OpParam {
public:
FeedParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
out_ = OutFrom<framework::Tensor>(outputs, scope);
}
const Tensor *InputX() const { return input_x_; }
Tensor *Out() const { return out_; }
private:
Tensor *input_x_;
Tensor *out_;
};
class FetchParam : public OpParam {
public:
FetchParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
out_ = OutFrom<framework::Tensor>(outputs, scope);
}
const Tensor *InputX() const { return input_x_; }
Tensor *Out() const { return out_; }
private:
Tensor *input_x_;
Tensor *out_;
};
class TransposeParam : public OpParam { class TransposeParam : public OpParam {
public: public:
TransposeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, TransposeParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
...@@ -609,7 +641,7 @@ class TransposeParam : public OpParam { ...@@ -609,7 +641,7 @@ class TransposeParam : public OpParam {
vector<int> axis_; vector<int> axis_;
}; };
class ReshapeParam : public OpParam { class ReshapeParam : public OpParam {
public: public:
ReshapeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, ReshapeParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
...@@ -628,13 +660,14 @@ class TransposeParam : public OpParam { ...@@ -628,13 +660,14 @@ class TransposeParam : public OpParam {
const vector<int> &Shape() const { return shape_; } const vector<int> &Shape() const { return shape_; }
const bool &Inplace() const {return inplace_; } const bool &Inplace() const { return inplace_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_shape_; Tensor *input_shape_;
Tensor *out_; Tensor *out_;
vector<int> shape_; vector<int> shape_;
bool inplace_; bool inplace_;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -75,3 +75,7 @@ target_link_libraries(test-gemm paddle-mobile) ...@@ -75,3 +75,7 @@ target_link_libraries(test-gemm paddle-mobile)
# gen test # gen test
ADD_EXECUTABLE(test-enforce common/test_enforce.cpp) ADD_EXECUTABLE(test-enforce common/test_enforce.cpp)
target_link_libraries(test-enforce paddle-mobile) target_link_libraries(test-enforce paddle-mobile)
# gen test
ADD_EXECUTABLE(test-googlenet net/test-googlenet.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-googlenet paddle-mobile)
...@@ -18,15 +18,16 @@ limitations under the License. */ ...@@ -18,15 +18,16 @@ limitations under the License. */
#include <vector> #include <vector>
#include "common/log.h" #include "common/log.h"
#include "framework/executor.h" #include "framework/executor.h"
#include "io.h"
#include "operators/conv_op.h" #include "operators/conv_op.h"
#include "operators/pool_op.h" #include "operators/pool_op.h"
#include "operators/softmax_op.h" #include "operators/softmax_op.h"
#include "operators/transpose_op.h" #include "operators/transpose_op.h"
#include "operators/reshape_op.h" #include "operators/reshape_op.h"
using paddle_mobile::Executor;
using paddle_mobile::framework::BlockDesc; using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim; using paddle_mobile::framework::DDim;
using paddle_mobile::framework::Executor;
using paddle_mobile::framework::LoDTensor; using paddle_mobile::framework::LoDTensor;
using paddle_mobile::framework::OpDesc; using paddle_mobile::framework::OpDesc;
using paddle_mobile::framework::Program; using paddle_mobile::framework::Program;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <fstream>
#include "../test_helper.h"
#include "../test_include.h"
#include "framework/executor.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
auto program = loader.Load(std::string("../models/googlenet"));
paddle_mobile::Executor<paddle_mobile::CPU> executor(program);
std::vector<float> input;
std::vector<int64_t> dims{1, 3, 224, 224};
GetInput<float>(g_test_image_1x3x224x224, &input, dims);
// DLOG << " input: " << input;
executor.predict(input, dims);
return 0;
}
...@@ -14,11 +14,22 @@ limitations under the License. */ ...@@ -14,11 +14,22 @@ limitations under the License. */
#pragma once #pragma once
#include <fstream>
#include <random> #include <random>
#include "common/log.h" #include "common/log.h"
#include "framework/ddim.h" #include "framework/ddim.h"
#include "framework/tensor.h" #include "framework/tensor.h"
static const std::string g_google = "../models/googlenet";
static const std::string g_mobilenet = "../models/mobilenet";
static const std::string g_mobilenet_ssd = "../models/mobilenet";
static const std::string g_squeezenet = "../models/squeezenet";
static const std::string g_resnet =
"../models/image_classification_resnet.inference.model";
static const std::string g_test_image_1x3x224x224 =
"../images/test_image_1x3x224x224_float";
template <typename T> template <typename T>
void SetupTensor(paddle_mobile::framework::Tensor *input, void SetupTensor(paddle_mobile::framework::Tensor *input,
paddle_mobile::framework::DDim dims, T lower, T upper) { paddle_mobile::framework::DDim dims, T lower, T upper) {
...@@ -31,3 +42,21 @@ void SetupTensor(paddle_mobile::framework::Tensor *input, ...@@ -31,3 +42,21 @@ void SetupTensor(paddle_mobile::framework::Tensor *input,
input_ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower); input_ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
} }
} }
template <typename T>
void GetInput(const std::string &input_name, std::vector<T> *input,
const std::vector<int64_t> &dims) {
int size = 1;
for (const auto &dim : dims) {
size *= dim;
}
T *input_ptr = (T *)malloc(sizeof(T) * size);
std::ifstream in(input_name, std::ios::in | std::ios::binary);
in.read((char *)(input_ptr), size * sizeof(T));
in.close();
for (int i = 0; i < size; ++i) {
input->push_back(input_ptr[i]);
}
free(input_ptr);
}
...@@ -19,6 +19,7 @@ limitations under the License. */ ...@@ -19,6 +19,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "./test_helper.h" #include "./test_helper.h"
#include "common/log.h"
#include "framework/framework.pb.h" #include "framework/framework.pb.h"
#include "framework/lod_tensor.h" #include "framework/lod_tensor.h"
#include "framework/operator.h" #include "framework/operator.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册