提交 d8cea7f7 编写于 作者: H hjchen2

revert batch size and protobuf-c

上级 69a0ecad
...@@ -33,11 +33,14 @@ namespace paddle_mobile { ...@@ -33,11 +33,14 @@ namespace paddle_mobile {
using framework::Variable; using framework::Variable;
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
const bool use_optimize, const bool loddable) const bool use_optimize, const bool loddable)
: program_(p), use_optimize_(use_optimize), loddable_(loddable) { : program_(p),
batch_size_(batch_size),
use_optimize_(use_optimize),
loddable_(loddable) {
Variable *variable_ptr = program_.scope->Var("batch_size"); Variable *variable_ptr = program_.scope->Var("batch_size");
variable_ptr->SetValue<int>(1); variable_ptr->SetValue<int>(batch_size);
to_predict_program_ = to_predict_program_ =
use_optimize_ ? program_.optimizeProgram : program_.originProgram; use_optimize_ ? program_.optimizeProgram : program_.originProgram;
PADDLE_MOBILE_ENFORCE(to_predict_program_ != nullptr, PADDLE_MOBILE_ENFORCE(to_predict_program_ != nullptr,
......
...@@ -35,7 +35,7 @@ class Executor { ...@@ -35,7 +35,7 @@ class Executor {
// @param program program converted from proto program in PaddlePaddle // @param program program converted from proto program in PaddlePaddle
// @param use_optimize bool whether use operator fusion to speed up or not // @param use_optimize bool whether use operator fusion to speed up or not
// @param loddable bool // @param loddable bool
Executor(const framework::Program<Dtype> program, Executor(const framework::Program<Dtype> program, int batch_size = 1,
const bool use_optimize = true, const bool loddable = false); const bool use_optimize = true, const bool loddable = false);
// predict with tensor input // predict with tensor input
...@@ -81,6 +81,7 @@ class Executor { ...@@ -81,6 +81,7 @@ class Executor {
framework::LoDTensor *tensor); framework::LoDTensor *tensor);
framework::Program<Dtype> program_; framework::Program<Dtype> program_;
int batch_size_ = 1;
std::shared_ptr<framework::ProgramDesc> to_predict_program_; std::shared_ptr<framework::ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc, std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>> std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
......
...@@ -25,7 +25,8 @@ void PaddleMobile<Dtype, P>::SetThreadNum(int num) { ...@@ -25,7 +25,8 @@ void PaddleMobile<Dtype, P>::SetThreadNum(int num) {
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
bool quantification, bool loddable) { bool quantification, int batch_size,
bool loddable) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<Loader<Dtype, P>>();
} else { } else {
...@@ -34,7 +35,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, ...@@ -34,7 +35,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<Executor<Dtype, P>>(
loader_->Load(dirname, optimize, quantification), optimize, loddable); loader_->Load(dirname, optimize, quantification), batch_size, optimize,
loddable);
} else { } else {
LOG(kLOG_INFO) << "executor inited"; LOG(kLOG_INFO) << "executor inited";
} }
...@@ -45,7 +47,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, ...@@ -45,7 +47,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
bool PaddleMobile<Dtype, P>::Load(const std::string &model_path, bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
const std::string &para_path, bool optimize, const std::string &para_path, bool optimize,
bool quantification, bool loddable) { bool quantification, int batch_size,
bool loddable) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<Loader<Dtype, P>>();
} else { } else {
...@@ -55,7 +58,7 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path, ...@@ -55,7 +58,7 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<Executor<Dtype, P>>(
loader_->Load(model_path, para_path, optimize, quantification), loader_->Load(model_path, para_path, optimize, quantification),
optimize, loddable); batch_size, optimize, loddable);
} else { } else {
LOG(kLOG_INFO) << "executor inited"; LOG(kLOG_INFO) << "executor inited";
} }
...@@ -67,6 +70,7 @@ template <typename Dtype, Precision P> ...@@ -67,6 +70,7 @@ template <typename Dtype, Precision P>
bool PaddleMobile<Dtype, P>::LoadCombinedMemory( bool PaddleMobile<Dtype, P>::LoadCombinedMemory(
size_t model_len, const uint8_t *model_buf, size_t combined_params_len, size_t model_len, const uint8_t *model_buf, size_t combined_params_len,
const uint8_t *combined_params_buf) { const uint8_t *combined_params_buf) {
int batch_size = 1;
bool optimise = true; bool optimise = true;
bool quantification = false; bool quantification = false;
...@@ -81,7 +85,7 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory( ...@@ -81,7 +85,7 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory(
loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len, loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len,
combined_params_buf, optimise, combined_params_buf, optimise,
quantification), quantification),
optimise); batch_size, optimise);
} else { } else {
LOG(kLOG_INFO) << "executor inited"; LOG(kLOG_INFO) << "executor inited";
} }
......
...@@ -36,11 +36,12 @@ class PaddleMobile { ...@@ -36,11 +36,12 @@ class PaddleMobile {
public: public:
PaddleMobile() {} PaddleMobile() {}
bool Load(const std::string &dirname, bool optimize = false, bool Load(const std::string &dirname, bool optimize = false,
bool quantification = false, bool loddable = false); bool quantification = false, int batch_size = 1,
bool loddable = false);
bool Load(const std::string &model_path, const std::string &para_path, bool Load(const std::string &model_path, const std::string &para_path,
bool optimize = false, bool quantification = false, bool optimize = false, bool quantification = false,
bool loddable = false); int batch_size = 1, bool loddable = false);
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t); std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t);
......
此差异已折叠。
/* /*
* Copyright (c) 2008-2018, Dave Benson and the protobuf-c authors. * Copyright (c) 2008-2017, Dave Benson and the protobuf-c authors.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -784,13 +784,13 @@ uint32_t protobuf_c_version_number(void); ...@@ -784,13 +784,13 @@ uint32_t protobuf_c_version_number(void);
* The version of the protobuf-c headers, represented as a string using the same * The version of the protobuf-c headers, represented as a string using the same
* format as protobuf_c_version(). * format as protobuf_c_version().
*/ */
#define PROTOBUF_C_VERSION "1.3.1" #define PROTOBUF_C_VERSION "1.3.0"
/** /**
* The version of the protobuf-c headers, represented as an integer using the * The version of the protobuf-c headers, represented as an integer using the
* same format as protobuf_c_version_number(). * same format as protobuf_c_version_number().
*/ */
#define PROTOBUF_C_VERSION_NUMBER 1003001 #define PROTOBUF_C_VERSION_NUMBER 1003000
/** /**
* The minimum protoc-c version which works with the current version of the * The minimum protoc-c version which works with the current version of the
...@@ -798,76 +798,6 @@ uint32_t protobuf_c_version_number(void); ...@@ -798,76 +798,6 @@ uint32_t protobuf_c_version_number(void);
*/ */
#define PROTOBUF_C_MIN_COMPILER_VERSION 1000000 #define PROTOBUF_C_MIN_COMPILER_VERSION 1000000
/**
* Look up a `ProtobufCEnumValue` from a `ProtobufCEnumDescriptor` by name.
*
* \param desc
* The `ProtobufCEnumDescriptor` object.
* \param name
* The `name` field from the corresponding `ProtobufCEnumValue` object to
* match.
* \return
* A `ProtobufCEnumValue` object.
* \retval NULL
* If not found or if the optimize_for = CODE_SIZE option was set.
*/
PROTOBUF_C__API
const ProtobufCEnumValue *protobuf_c_enum_descriptor_get_value_by_name(
const ProtobufCEnumDescriptor *desc, const char *name);
/**
* Look up a `ProtobufCEnumValue` from a `ProtobufCEnumDescriptor` by numeric
* value.
*
* \param desc
* The `ProtobufCEnumDescriptor` object.
* \param value
* The `value` field from the corresponding `ProtobufCEnumValue` object to
* match.
*
* \return
* A `ProtobufCEnumValue` object.
* \retval NULL
* If not found.
*/
PROTOBUF_C__API
const ProtobufCEnumValue *protobuf_c_enum_descriptor_get_value(
const ProtobufCEnumDescriptor *desc, int value);
/**
* Look up a `ProtobufCFieldDescriptor` from a `ProtobufCMessageDescriptor` by
* the name of the field.
*
* \param desc
* The `ProtobufCMessageDescriptor` object.
* \param name
* The name of the field.
* \return
* A `ProtobufCFieldDescriptor` object.
* \retval NULL
* If not found or if the optimize_for = CODE_SIZE option was set.
*/
PROTOBUF_C__API
const ProtobufCFieldDescriptor *protobuf_c_message_descriptor_get_field_by_name(
const ProtobufCMessageDescriptor *desc, const char *name);
/**
* Look up a `ProtobufCFieldDescriptor` from a `ProtobufCMessageDescriptor` by
* the tag value of the field.
*
* \param desc
* The `ProtobufCMessageDescriptor` object.
* \param value
* The tag value of the field.
* \return
* A `ProtobufCFieldDescriptor` object.
* \retval NULL
* If not found.
*/
PROTOBUF_C__API
const ProtobufCFieldDescriptor *protobuf_c_message_descriptor_get_field(
const ProtobufCMessageDescriptor *desc, unsigned value);
/** /**
* Determine the number of bytes required to store the serialised message. * Determine the number of bytes required to store the serialised message.
* *
...@@ -879,42 +809,6 @@ const ProtobufCFieldDescriptor *protobuf_c_message_descriptor_get_field( ...@@ -879,42 +809,6 @@ const ProtobufCFieldDescriptor *protobuf_c_message_descriptor_get_field(
PROTOBUF_C__API PROTOBUF_C__API
size_t protobuf_c_message_get_packed_size(const ProtobufCMessage *message); size_t protobuf_c_message_get_packed_size(const ProtobufCMessage *message);
/**
* Serialise a message from its in-memory representation.
*
* This function stores the serialised bytes of the message in a pre-allocated
* buffer.
*
* \param message
* The message object to serialise.
* \param[out] out
* Buffer to store the bytes of the serialised message. This buffer must
* have enough space to store the packed message. Use
* protobuf_c_message_get_packed_size() to determine the number of bytes
* required.
* \return
* Number of bytes stored in `out`.
*/
PROTOBUF_C__API
size_t protobuf_c_message_pack(const ProtobufCMessage *message, uint8_t *out);
/**
* Serialise a message from its in-memory representation to a virtual buffer.
*
* This function calls the `append` method of a `ProtobufCBuffer` object to
* consume the bytes generated by the serialiser.
*
* \param message
* The message object to serialise.
* \param buffer
* The virtual buffer object.
* \return
* Number of bytes passed to the virtual buffer.
*/
PROTOBUF_C__API
size_t protobuf_c_message_pack_to_buffer(const ProtobufCMessage *message,
ProtobufCBuffer *buffer);
/** /**
* Unpack a serialised message into an in-memory representation. * Unpack a serialised message into an in-memory representation.
* *
...@@ -983,33 +877,6 @@ PROTOBUF_C__API ...@@ -983,33 +877,6 @@ PROTOBUF_C__API
void protobuf_c_message_init(const ProtobufCMessageDescriptor *descriptor, void protobuf_c_message_init(const ProtobufCMessageDescriptor *descriptor,
void *message); void *message);
/**
* Free a service.
*
* \param service
* The service object to free.
*/
PROTOBUF_C__API
void protobuf_c_service_destroy(ProtobufCService *service);
/**
* Look up a `ProtobufCMethodDescriptor` by name.
*
* \param desc
* Service descriptor.
* \param name
* Name of the method.
*
* \return
* A `ProtobufCMethodDescriptor` object.
* \retval NULL
* If not found or if the optimize_for = CODE_SIZE option was set.
*/
PROTOBUF_C__API
const ProtobufCMethodDescriptor *
protobuf_c_service_descriptor_get_method_by_name(
const ProtobufCServiceDescriptor *desc, const char *name);
/** /**
* Initialise a `ProtobufCBufferSimple` object. * Initialise a `ProtobufCBufferSimple` object.
*/ */
...@@ -1047,18 +914,6 @@ PROTOBUF_C__API ...@@ -1047,18 +914,6 @@ PROTOBUF_C__API
void protobuf_c_buffer_simple_append(ProtobufCBuffer *buffer, size_t len, void protobuf_c_buffer_simple_append(ProtobufCBuffer *buffer, size_t len,
const unsigned char *data); const unsigned char *data);
PROTOBUF_C__API
void protobuf_c_service_generated_init(
ProtobufCService *service, const ProtobufCServiceDescriptor *descriptor,
ProtobufCServiceDestroy destroy);
PROTOBUF_C__API
void protobuf_c_service_invoke_internal(ProtobufCService *service,
unsigned method_index,
const ProtobufCMessage *input,
ProtobufCClosure closure,
void *closure_data);
/**@}*/ /**@}*/
PROTOBUF_C__END_DECLS PROTOBUF_C__END_DECLS
......
...@@ -23,7 +23,7 @@ int main() { ...@@ -23,7 +23,7 @@ int main() {
// auto isok = paddle_mobile.Load(std::string(g_mobilenet_detect) + "/model", // auto isok = paddle_mobile.Load(std::string(g_mobilenet_detect) + "/model",
// std::string(g_mobilenet_detect) + "/params", true); // std::string(g_mobilenet_detect) + "/params", true);
auto isok = paddle_mobile.Load(g_nlp, true, false, true); auto isok = paddle_mobile.Load(g_nlp, true, false, 1, true);
// auto isok = paddle_mobile.Load(std::string(g_nlp) + "/model", // auto isok = paddle_mobile.Load(std::string(g_nlp) + "/model",
// std::string(g_nlp) + "/params", false); // std::string(g_nlp) + "/params", false);
......
...@@ -4,7 +4,8 @@ TOTAL_ERRORS=0 ...@@ -4,7 +4,8 @@ TOTAL_ERRORS=0
# The trick to remove deleted files: https://stackoverflow.com/a/2413151 # The trick to remove deleted files: https://stackoverflow.com/a/2413151
for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}' | \ for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}' | \
grep -v ".pb.cpp" | grep -v ".pb.h" | grep -v ".pb-c.h" | grep -v ".pb-c.c"); do grep -v ".pb.cpp" | grep -v ".pb.h" | grep -v ".pb-c.h" | grep -v ".pb-c.c" | \
grep -v "protobuf-c.h" | grep -v "protobuf-c.c"); do
cpplint $file; cpplint $file;
TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?);
done done
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册