未验证 提交 c73a4302 编写于 作者: W WangLiu 提交者: GitHub

Merge pull request #314 from cocodark/develop

modify operator interface for printing tensor array
......@@ -13,11 +13,32 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "framework/operator.h"
#include "framework/op_info.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
vector<string> OperatorBase<Dtype>::GetOutKeys() const {
auto it = op_input_output_key.find(type_);
if (it == op_input_output_key.end()) {
DLOG << type_ << " has no outputs";
}
return it->second.second;
}
template <typename T>
static T *GetVarValue(const string &key, const VariableNameMap &var_map,
const Scope &scope) {
auto var_vec = var_map.at(key);
if (!var_vec.empty()) {
auto var = scope.FindVar(var_vec[0]);
return var->GetMutable<T>();
} else {
return nullptr;
}
}
template <typename Dtype>
OperatorBase<Dtype>::OperatorBase(const std::string &type,
const VariableNameMap &inputs,
......@@ -31,9 +52,22 @@ OperatorBase<Dtype>::OperatorBase(const std::string &type,
scope_(scope) {
CheckAllInputOutputSet();
}
template <typename Dtype>
void OperatorBase<Dtype>::CheckAllInputOutputSet() const {}
template <typename Dtype>
void OperatorBase<Dtype>::Run() const {
RunImpl();
#ifdef PADDLE_MOBILE_DEBUG
vector<string> output_keys = GetOutKeys();
for (const auto key : output_keys) {
Tensor *out_ = GetVarValue<framework::LoDTensor>(key, outputs_, *scope_);
DLOG << type_ << " output- " << key << "=" << *out_;
}
#endif
}
template class OperatorBase<CPU>;
template class OperatorWithKernel<CPU>;
......
......@@ -36,6 +36,8 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
using std::string;
using std::vector;
static std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
op_input_output_key = {{"conv2d", {{"Input"}, {"Output"}}},
......@@ -57,7 +59,9 @@ class OperatorBase : PaddleMobileObject {
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope);
virtual ~OperatorBase() {}
virtual void Run() const = 0;
void Run() const;
vector<string> GetOutKeys() const;
virtual void RunImpl() const = 0;
virtual void InferShape() const = 0;
const VariableNameMap &Inputs() const { return inputs_; }
......@@ -88,7 +92,8 @@ class OperatorWithKernel : public OperatorBase<Dtype> {
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope)
: OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {}
virtual void Run() const = 0;
virtual void RunImpl() const = 0;
virtual void InferShape() const = 0;
};
......
......@@ -18,11 +18,12 @@ limitations under the License. */
#include <cstdint>
#include <cstring>
#include <memory>
#include <type_traits>
#include <typeindex>
#include <vector>
#include "data_layout.h"
#include "ddim.h"
#include "framework/data_layout.h"
#include "framework/ddim.h"
#include "memory/t_malloc.h"
namespace paddle_mobile {
......@@ -62,8 +63,8 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> {
static inline size_t SizeOfType(std::type_index type) {
SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t> functor;
size_t size = functor(type);
// PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s",
// type.name());
PADDLE_MOBILE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name());
return size;
}
......@@ -72,16 +73,27 @@ class LoDTensor;
class Tensor {
public:
Tensor() : offset_(0) {}
template <typename T>
Tensor(std::vector<T> input, DDim ddim) : offset_(0) {
PADDLE_MOBILE_ENFORCE(
input.size() == framework::product(ddim),
"input vector'length should be equal to tensor's length");
auto input_ptr = mutable_data<T>(ddim);
for (int i = 0; i < input.size(); ++i) {
input_ptr[i] = input[i];
}
}
/*! Return a pointer to mutable memory block. */
template <typename T>
inline T *data() {
check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value ||
// holder_->type().hash_code() ==
// typeid(T).hash_code(),
// "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name());
PADDLE_MOBILE_ENFORCE(
(std::is_same<T, void>::value ||
holder_->type().hash_code() == typeid(T).hash_code()),
"Tensor holds the wrong type, it holds %s",
this->holder_->type().name());
return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_);
}
......@@ -90,11 +102,11 @@ class Tensor {
template <typename T>
inline const T *data() const {
check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value ||
// holder_->type().hash_code() ==
// typeid(T).hash_code(),
// "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name());
PADDLE_MOBILE_ENFORCE(
(std::is_same<T, void>::value ||
holder_->type().hash_code() == typeid(T).hash_code()),
"Tensor holds the wrong type, it holds %s",
this->holder_->type().name());
return reinterpret_cast<const T *>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
......@@ -116,17 +128,11 @@ class Tensor {
if (holder_ != nullptr) {
holder_->set_type(type);
}
// PADDLE_ENFORCE_GE(numel(), 0,
// "When calling this method, the Tensor's
// numel must be
// " "equal or larger than zero. " "Please
// check
// Tensor::Resize has been called first.");
PADDLE_MOBILE_ENFORCE(numel() >= 0, "the Tensor'snumel must >=0.")
int64_t size = numel() * SizeOfType(type);
/* some versions of boost::variant don't have operator!= */
if (holder_ == nullptr || holder_->size() < size + offset_) {
holder_.reset(new PlaceholderImpl(size, type));
offset_ = 0;
}
return reinterpret_cast<void *>(
......@@ -179,16 +185,13 @@ class Tensor {
*/
inline Tensor Slice(int begin_idx, int end_idx) const {
check_memory_size();
// PADDLE_ENFORCE_GE(begin_idx, 0,
// "The start row index must be greater than
// 0.");
// PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is
// out of
// bound."); PADDLE_ENFORCE_LT(
// begin_idx, end_idx,
// "The start row index must be lesser than the end row
// index.");
PADDLE_MOBILE_ENFORCE(begin_idx >= 0,
"The start row index must be greater than 0.")
PADDLE_MOBILE_ENFORCE(end_idx <= dims_[0],
"The end row index is out of bound.")
PADDLE_MOBILE_ENFORCE(
begin_idx < end_idx,
"The start row index must be lesser than the end row index")
if (dims_[0] == 1) {
return *this;
} else {
......@@ -205,10 +208,9 @@ class Tensor {
}
std::type_index type() const {
// PADDLE_ENFORCE_NOT_NULL(
// holder_, "Tensor not initialized yet
// when
// Tensor::type() is called.");
PADDLE_MOBILE_ENFORCE(
holder_ != nullptr,
"Tensor not initialized yet when Tensor::type() is called.")
return holder_->type();
}
......@@ -221,12 +223,8 @@ class Tensor {
PADDLE_MOBILE_ENFORCE(
holder_ != nullptr,
"Tensor holds no memory. Call Tensor::mutable_data first.");
PADDLE_MOBILE_ENFORCE(
numel() * SizeOfType(type()) <= memory_size(),
"Tensor's dims_ is out of bound. CallTensor::mutable_data "
"first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data\
already stored.");
PADDLE_MOBILE_ENFORCE(numel() * SizeOfType(type()) <= memory_size(),
"Tensor's dims_ is out of bound. ");
}
inline DataLayout layout() const { return layout_; }
......@@ -257,13 +255,8 @@ class Tensor {
memory::PODDeleter<uint8_t>()),
size_(size),
type_(type) {
// PADDLE_ENFORCE_NOT_NULL(ptr_,
// "Insufficient %s
// memory to allocation.",
// (is_cpu_place(place_)
// ?
// "CPU" :
// "GPU"));
PADDLE_MOBILE_ENFORCE(ptr_ != nullptr,
"Insufficient memory to allocation");
}
virtual size_t size() const { return size_; }
......@@ -321,6 +314,19 @@ class Tensor {
size_t offset_;
};
#ifdef PADDLE_MOBILE_DEBUG
inline Print &operator<<(Print &printer, const Tensor &tensor) {
printer << " dims: " << tensor.dims() << "\n";
int stride = tensor.numel() / 20;
stride = stride > 0 ? stride : 1;
for (int i = 0; i < tensor.numel(); i += stride) {
printer << tensor.data<float>()[i] << " ";
}
return printer;
}
#endif
inline Tensor ReshapeToMatrix(const Tensor &src, int num_col_dims) {
Tensor res;
res.ShareDataWith(src);
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io.h"
#include <fstream>
#include <vector>
#include "common/enforce.h"
#include "common/log.h"
#include "framework/framework.pb-c.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/program/program_desc.h"
#include "framework/program/var_desc.h"
#include "framework/scope.h"
#include "framework/tensor.h"
namespace paddle_mobile {
using framework::Variable;
void ReadBinaryFile(const std::string &filename, std::string *contents) {
std::ifstream fin(filename, std::ios::in | std::ios::binary);
PADDLE_MOBILE_ENFORCE(fin.is_open(), "open file: %s failed",
filename.c_str());
fin.seekg(0, std::ios::end);
contents->clear();
contents->resize(fin.tellg());
fin.seekg(0, std::ios::beg);
fin.read(&(contents->at(0)), contents->size());
fin.close();
}
static size_t ReadBuffer(const char *file_name, uint8_t **out) {
printf("%s \n", file_name);
FILE *fp;
fp = fopen(file_name, "rb");
PADDLE_MOBILE_ENFORCE(fp != NULL, " %s open failed !", file_name);
fseek(fp, 0, SEEK_END);
size_t size = ftell(fp);
rewind(fp);
DLOG << "model size: " << size;
*out = (uint8_t *)malloc(size);
size_t cur_len = 0;
size_t nread;
while ((nread = fread(*out + cur_len, 1, size - cur_len, fp)) != 0) {
cur_len += nread;
}
fclose(fp);
return cur_len;
}
template <typename Dtype, Precision P>
void Loader<Dtype, P>::LoadVar(framework::Variable *variable,
const framework::VarDesc &var_desc,
const std::string &file_path) {
auto tensor = variable->GetMutable<framework::LoDTensor>();
std::ifstream is(file_path);
PADDLE_MOBILE_ENFORCE(is.is_open(), "open file: %s failed",
file_path.c_str());
std::fpos<mbstate_t> pos;
pos = is.tellg(); // save current position
is.seekg(0, std::ios::end);
is.seekg(pos); // restore saved position
// 1. version
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// 2 Lod information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
auto &lod = *tensor->mutable_lod();
lod.resize(lod_level);
for (uint64_t i = 0; i < lod_level; ++i) {
uint64_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::vector<size_t> tmp(size / sizeof(size_t));
is.read(reinterpret_cast<char *>(tmp.data()),
static_cast<std::streamsize>(size));
for (auto j : tmp) {
LOG(kLOG_DEBUG1) << " lod - " << j;
}
lod[i] = tmp;
}
// 3. tensor version
uint32_t tensor_version;
is.read(reinterpret_cast<char *>(&tensor_version), sizeof(tensor_version));
// 4. tensor desc
int32_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char *>(buf.get()), size);
const framework::TensorDesc &desc = var_desc.Tensor_desc();
PaddleMobile__Framework__Proto__VarType__TensorDesc *tensor_desc = NULL;
// void *v;
// PaddleMobile__Framework__Proto__VarType__TensorDesc_Closure()(tensor_desc,
// buf.get());
// DLOG << "PaddleMobile__Framework__Proto__VarType__TensorDesc_Closure- " <<
// tensor_desc;
// framework::TensorDesc &tensor_desc = variable->
// PaddleMobile__Framework__Proto__ProgramDesc *c_program;
// uint8_t *proto_buf = NULL;
// size_t read_size = ReadBuffer(file_path.c_str(), &proto_buf);
// c_program = paddle_mobile__framework__proto__program_desc__unpack(NULL,
// read_size, buf);
// paddle_mobile__framework__proto__var_type__tensor_desc__init()
int memory_size = 1;
for (auto l : desc.Dims()) {
memory_size *= l;
}
tensor->Resize(framework::make_ddim(desc.Dims()));
void *memory = tensor;
int type_size = 0;
switch (desc.DataType()) {
case framework::VARTYPE_TYPE_FP16:
type_size = 2;
break;
case framework::VARTYPE_TYPE_FP32:
type_size = 4;
memory = tensor->mutable_data<float>();
break;
case framework::VARTYPE_TYPE_FP64:
type_size = 8;
break;
case framework::VARTYPE_TYPE_INT32:
type_size = 4;
break;
case framework::VARTYPE_TYPE_INT64:
type_size = 8;
break;
case framework::VARTYPE_TYPE_BOOL:
type_size = 1;
break;
default:
break;
}
is.read(static_cast<char *>(memory), memory_size * type_size);
is.close();
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname) {
std::string model_filename = dirname + "/__model__";
PaddleMobile__Framework__Proto__ProgramDesc *c_program;
uint8_t *buf = NULL;
size_t read_size = ReadBuffer(model_filename.c_str(), &buf);
PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null");
c_program = paddle_mobile__framework__proto__program_desc__unpack(
NULL, read_size, buf);
PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null");
DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
std::shared_ptr<framework::ProgramDesc> originProgramDesc =
std::make_shared<framework::ProgramDesc>(c_program);
framework::Program<Dtype, P> program;
program.model_path = dirname;
program.originProgram = originProgramDesc;
std::shared_ptr<framework::Scope> scope =
std::make_shared<framework::Scope>();
program.scope = scope;
originProgramDesc->Block(0);
for (const auto &block : originProgramDesc->Blocks()) {
for (int i = 0; i < block->Vars().size(); ++i) {
std::shared_ptr<framework::VarDesc> var_desc = block->Vars()[i];
// DLOG << "var name-- " << var_desc->Name();
auto var = scope->Var(var_desc->Name());
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
if (var_desc->Persistable() &&
var_desc->Type() != framework::VARTYPE_TYPE_FEED_MINIBATCH &&
var_desc->Type() != framework::VARTYPE_TYPE_FETCH_LIST) {
// DLOG << "to load var ";
auto dim = var_desc->Tensor_desc().Dims();
auto tensor = var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim(dim));
} else {
auto dim = var_desc->Tensor_desc().Dims();
PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
dim[0] = 1;
auto tensor = var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim(dim));
}
} else {
// TODO(codeWorm): some.
}
}
}
originProgramDesc->Description("program: ");
paddle_mobile__framework__proto__program_desc__free_unpacked(c_program, NULL);
return program;
}
template class Loader<CPU, Precision::FP32>;
#pragma mark - executor
template <typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<framework::BlockDesc>> blocks =
to_predict_program_->Blocks();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<framework::BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<framework::OpDesc>> ops = block_desc->Ops();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<framework::OpDesc> op = ops[j];
auto op_base = framework::OpRegistry<Dtype>::CreateOp(
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
program_.scope);
op_base->InferShape();
ops_of_block_[*block_desc.get()].push_back(op_base);
}
}
InitMemory();
}
template <typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size)
: program_(p), batch_size_(batch_size) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
Variable *variable_ptr = program_.scope->Var("batch_size");
variable_ptr[0].SetValue<int>(batch_size);
const std::vector<std::shared_ptr<framework::BlockDesc>> blocks =
to_predict_program_->Blocks();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<framework::BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<framework::OpDesc>> ops = block_desc->Ops();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<framework::OpDesc> op = ops[j];
auto op_base = framework::OpRegistry<Dtype>::CreateOp(
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
program_.scope);
op_base->InferShape();
ops_of_block_[*block_desc.get()].push_back(op_base);
}
}
InitMemory();
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor,
const std::string &file_path) {
std::ifstream is(file_path);
PADDLE_MOBILE_ENFORCE(is.is_open(), "open file: %s failed",
file_path.c_str());
std::fpos<mbstate_t> pos;
pos = is.tellg(); // save current position
is.seekg(0, std::ios::end);
is.seekg(pos); // restore saved position
// 1. version
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// 2 Lod information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
auto &lod = *tensor->mutable_lod();
lod.resize(lod_level);
for (uint64_t i = 0; i < lod_level; ++i) {
uint64_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::vector<size_t> tmp(size / sizeof(size_t));
is.read(reinterpret_cast<char *>(tmp.data()),
static_cast<std::streamsize>(size));
for (auto j : tmp) {
LOG(kLOG_DEBUG1) << " lod - " << j;
}
lod[i] = tmp;
}
// 3. tensor version
uint32_t tensor_version;
is.read(reinterpret_cast<char *>(&tensor_version), sizeof(tensor_version));
// 4. tensor desc
int32_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char *>(buf.get()), size);
const framework::TensorDesc &desc = var_desc.Tensor_desc();
int memory_size = 1;
for (auto l : desc.Dims()) {
memory_size *= l;
}
tensor->Resize(framework::make_ddim(desc.Dims()));
void *memory = tensor;
int type_size = 0;
switch (desc.DataType()) {
case framework::VARTYPE_TYPE_FP16:
type_size = 2;
break;
case framework::VARTYPE_TYPE_FP32:
type_size = 4;
memory = tensor->mutable_data<float>();
break;
case framework::VARTYPE_TYPE_FP64:
type_size = 8;
break;
case framework::VARTYPE_TYPE_INT32:
type_size = 4;
break;
case framework::VARTYPE_TYPE_INT64:
type_size = 8;
break;
case framework::VARTYPE_TYPE_BOOL:
type_size = 1;
break;
default:
break;
}
is.read(static_cast<char *>(memory), memory_size * type_size);
is.close();
};
template <typename Dtype, Precision P>
void Executor<Dtype, P>::InitMemory() {
for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) {
auto var = program_.scope->Var(var_desc->Name());
if (var_desc->Persistable()) {
auto tensor = var->template GetMutable<framework::LoDTensor>();
if (var_desc->Name() == "feed" || var_desc->Name() == "fetch") {
continue;
}
LoadMemory(*var_desc, tensor,
program_.model_path + "/" + var_desc->Name());
} else {
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
auto tensor = var->template GetMutable<framework::LoDTensor>();
tensor->template mutable_data<Ptype>();
}
}
}
}
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
framework::Tensor *feed_tensor =
g_feed_value->GetMutable<framework::LoDTensor>();
feed_tensor->Resize(t.dims());
feed_tensor->ShareDataWith(t);
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
op->Run();
}
}
template <typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
DLOG << "start predict: ";
framework::LoDTensor tensor;
auto ddim = framework::make_ddim(dims);
auto input_ptr = tensor.mutable_data<Ptype>(ddim);
for (int i = 0; i < input.size(); ++i) {
input_ptr[i] = input[i];
}
predict(tensor, 0);
framework::Variable *g_feed_value = program_.scope->Var("col");
auto feed_tensor = g_feed_value->GetMutable<framework::Tensor>();
return {};
}
template class Executor<CPU, Precision::FP32>;
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory.h>
#include <string>
#include <vector>
#include "common/types.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/paddle_mobile_object.h"
#include "framework/program/program.h"
#include "framework/tensor.h"
namespace paddle_mobile {
template <typename Dtype, Precision P = Precision::FP32>
class Loader : PaddleMobileObject {
public:
const framework::Program<Dtype, P> Load(const std::string &dirname);
private:
void LoadVar(framework::Variable *variable,
const framework::VarDesc &var_desc,
const std::string &file_path);
};
template <typename Dtype, Precision P = Precision::FP32>
class Executor {
public:
typedef typename PrecisionTrait<P>::ptype Ptype;
Executor() = default;
Executor(const framework::Program<Dtype> p);
Executor(const framework::Program<Dtype> p, int batch_size);
std::shared_ptr<framework::Tensor> predict(framework::Tensor &t);
std::vector<Ptype> predict(const std::vector<Ptype> &input,
const std::vector<int64_t> &dims);
protected:
void InitMemory();
void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, const std::string &file_path);
framework::Program<Dtype> program_;
int batch_size_ = 1;
std::shared_ptr<framework::ProgramDesc> to_predict_program_;
void predict(const framework::Tensor &t, int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
};
} // namespace paddle_mobile
......@@ -12,19 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/batchnorm_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
using std::string;
template <typename DeviceType, typename T>
class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
public:
BatchNormOp(const std::string &type, const VariableNameMap &inputs,
BatchNormOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
......@@ -32,7 +33,7 @@ class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::BatchNormKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -36,7 +36,7 @@ class BoxCoderOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::BoxCoderKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -13,25 +13,25 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/concat_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
using std::string;
template <typename DeviceType, typename T>
class ConcatOp : public framework::OperatorWithKernel<DeviceType> {
public:
ConcatOp(const std::string &type, const VariableNameMap &inputs,
ConcatOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::ConcatKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -14,14 +14,13 @@ limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/conv_kernel.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
using std::string;
template <typename DeviceType, typename T>
class ConvOp : public framework::OperatorWithKernel<DeviceType> {
public:
......@@ -35,7 +34,7 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
void Run() const {
void RunImpl() const {
operators::ConvKernel<DeviceType, T> kernel;
kernel.Compute(param_);
this->ClearVariables({"Filter", "Input"});
......
......@@ -12,19 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "kernel/elementwise_add_kernel.h"
#include "op_param.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
using std::string;
template <typename DeviceType, typename T>
class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
public:
ElementwiseAddOp(const std::string &type, const VariableNameMap &inputs,
ElementwiseAddOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
......@@ -32,7 +33,7 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::ElementwiseAddKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -14,22 +14,23 @@ limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using std::string;
template <typename DeviceType, typename T>
class FeedOp : public framework::OperatorBase<DeviceType> {
public:
FeedOp(const std::string &type, const VariableNameMap &inputs,
FeedOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void RunImpl() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void InferShape() const {
auto out_dims = param_.Out()->dims();
......
......@@ -14,27 +14,24 @@ limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using std::string;
template <typename DeviceType, typename T>
class FetchOp : public framework::OperatorBase<DeviceType> {
public:
FetchOp(const std::string &type, const VariableNameMap &inputs,
FetchOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
param_.Out()->ShareDataWith(*param_.InputX());
for (int i = 0; i < param_.Out()->numel(); ++i) {
DLOG << param_.Out()->template data<float>()[i];
}
}
void RunImpl() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void InferShape() const {
auto x_dims = param_.InputX()->dims();
......
......@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
......@@ -22,7 +23,8 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
using std::string;
using std::vector;
class FusionFcMatcher : public framework::FusionOpMatcher {
public:
FusionFcMatcher() {
......@@ -31,7 +33,7 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
}
void FolderNodes(framework::Node &node) {
std::vector<std::shared_ptr<framework::OpDesc>> origin_descs =
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node.OpDescs(node_.Depth());
node.Folder(node_.Depth(), Type(), {{"elementwise_add", {"Y", "Z"}}});
}
......@@ -42,7 +44,7 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
template <typename DeviceType, typename T>
class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
public:
FushionFcOp(const std::string &type, const VariableNameMap &inputs,
FushionFcOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
......@@ -50,7 +52,7 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::FushionFcKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -11,27 +11,27 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/lrn_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
using std::string;
template <typename DeviceType, typename T>
class LrnOp : public framework::OperatorWithKernel<DeviceType> {
public:
LrnOp(const std::string &type, const VariableNameMap &inputs,
LrnOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::LrnKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -11,7 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/mul_kernel.h"
#include "operators/op_param.h"
......@@ -19,8 +21,6 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class MulOp : public framework::OperatorWithKernel<DeviceType> {
public:
......@@ -31,7 +31,7 @@ class MulOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::MulKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -36,7 +36,7 @@ class MultiClassNMSOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::MultiClassNMSKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -17,25 +17,26 @@ limitations under the License. */
#include <framework/operator.h>
#include <operators/kernel/pool_kernel.h>
#include <operators/op_param.h>
#include <string>
namespace paddle_mobile {
namespace operators {
using namespace framework;
using framework::AttributeMap;
using framework::OperatorWithKernel;
using framework::Scope;
using std::string;
template <typename DeviceType, typename T>
class PoolOp : public framework::OperatorWithKernel<DeviceType> {
class PoolOp : public OperatorWithKernel<DeviceType> {
public:
PoolOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
PoolOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope)
: OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs, scope),
param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
using OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
void Run() const {
// InferShape();
void RunImpl() const {
operators::PoolKernel<DeviceType, T> kernel;
kernel.Compute(param_);
this->ClearVariables({"X"});
......
......@@ -36,7 +36,7 @@ class PriorBoxOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::PriorBoxKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -35,7 +35,7 @@ class ReluOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::ReluKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -35,7 +35,7 @@ class ReshapeOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::ReshapeKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -36,7 +36,7 @@ class SigmoidOp : public framework::OperatorWithKernel<DeviceType> {
void InferShape() const override;
void Run() const {
void RunImpl() const {
operators::SigmoidKernel<DeviceType, T> kernel;
kernel.Compute(param_);
this->ClearVariables({"X"});
......
......@@ -36,7 +36,7 @@ class SoftmaxOp : public framework::OperatorWithKernel<DeviceType> {
void InferShape() const override;
void Run() const {
void RunImpl() const {
operators::SoftmaxKernel<DeviceType, T> kernel;
kernel.Compute(param_);
this->ClearVariables({"X"});
......
......@@ -36,7 +36,7 @@ class TransposeOp : public framework::OperatorWithKernel<DeviceType> {
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
void RunImpl() const {
operators::TransposeKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include <string>
#include <vector>
#include "./io.h"
#include "common/io.h"
#include "common/log.h"
#include "framework/op_registry.h"
#include "operators/conv_op.h"
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io.h"
#include "common/io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
......@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "common/io.h"
#include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h"
#include "io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
......@@ -13,25 +13,25 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <fstream>
#include "../test_helper.h"
#include "../test_include.h"
#include "io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
auto time1 = time();
auto program = loader.Load(std::string("../models/googlenet"));
auto time2 = time();
DLOG << "load cost :" << time_diff(time1, time1) << "ms";
paddle_mobile::Executor<paddle_mobile::CPU> executor(program, 1);
std::vector<float> input;
std::vector<int64_t> dims{1, 3, 224, 224};
GetInput<float>(g_test_image_1x3x224x224, &input, dims);
// DLOG << " input: " << input;
auto time3 = time();
executor.predict(input, dims);
auto time4 = time();
DLOG << "predict cost :" << time_diff(time3, time4) << "ms";
return 0;
}
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h"
#include "../test_helper.h"
#include "io.h"
#include "common/io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h"
#include "../test_helper.h"
#include "./io.h"
#include "common/io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../test_helper.h"
#include "./io.h"
#include "common/io.h"
int main() {
paddle_mobile::framework::Tensor input;
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h"
#include "../test_helper.h"
#include "./io.h"
#include "common/io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h"
#include "../test_helper.h"
#include "./io.h"
#include "common/io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
......@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include <chrono>
#include <fstream>
#include <random>
......@@ -31,6 +32,18 @@ static const std::string g_test_image_1x3x224x224 =
"../images/test_image_1x3x224x224_float";
using paddle_mobile::framework::DDim;
using paddle_mobile::framework::Tensor;
using Time = decltype(std::chrono::high_resolution_clock::now());
Time time() { return std::chrono::high_resolution_clock::now(); }
double time_diff(Time t1, Time t2) {
typedef std::chrono::microseconds ms;
auto diff = t2 - t1;
ms counter = std::chrono::duration_cast<ms>(diff);
return counter.count() / 1000.0;
}
template <typename T>
void SetupTensor(paddle_mobile::framework::Tensor *input,
paddle_mobile::framework::DDim dims, T lower, T upper) {
......
......@@ -20,6 +20,7 @@ limitations under the License. */
#include "./test_helper.h"
#include "common/enforce.h"
#include "common/io.h"
#include "common/log.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
......@@ -29,4 +30,3 @@ limitations under the License. */
#include "framework/scope.h"
#include "framework/tensor.h"
#include "framework/variable.h"
#include "io.h"
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册