提交 91ac601a 编写于 作者: W wangliu

modify code style

上级 5766d532
...@@ -14,8 +14,8 @@ limitations under the License. */ ...@@ -14,8 +14,8 @@ limitations under the License. */
#pragma once #pragma once
#include <memory.h>
#include <map> #include <map>
#include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
...@@ -27,21 +27,21 @@ limitations under the License. */ ...@@ -27,21 +27,21 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
template <typename Dtype, Precision P = Precision::FP32> template <typename Dtype = CPU, Precision P = Precision::FP32>
class Loader { class Loader {
public: public:
const framework::Program<Dtype, P> Load(const std::string &dirname, const framework::Program<Dtype, P> Load(const std::string &dirname,
bool optimize = false); bool optimize = false);
private: private:
void LoadVar(framework::Variable *variable, void LoadVar(framework::Variable *variable,
const framework::VarDesc &var_desc, const framework::VarDesc &var_desc,
const std::string &file_path); const std::string &file_path);
}; };
template <typename Dtype, Precision P = Precision::FP32> template <typename Dtype = CPU, Precision P = Precision::FP32>
class Executor { class Executor {
public: public:
typedef typename PrecisionTrait<P>::ptype Ptype; typedef typename PrecisionTrait<P>::ptype Ptype;
Executor(const framework::Program<Dtype> p, int batch_size = 1, Executor(const framework::Program<Dtype> p, int batch_size = 1,
...@@ -52,7 +52,7 @@ class Executor { ...@@ -52,7 +52,7 @@ class Executor {
std::vector<Ptype> Predict(const std::vector<Ptype> &input, std::vector<Ptype> Predict(const std::vector<Ptype> &input,
const std::vector<int64_t> &dims); const std::vector<int64_t> &dims);
protected: protected:
Executor() = default; Executor() = default;
void InitMemory(); void InitMemory();
...@@ -64,9 +64,9 @@ class Executor { ...@@ -64,9 +64,9 @@ class Executor {
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t, std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t,
int block_id); int block_id);
std::map<framework::BlockDesc, std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>> std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_; ops_of_block_;
bool use_optimize_ = false; bool use_optimize_ = false;
}; };
} // namespace paddle_mobile } // namespace paddle_mobile
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册