提交 ddf6b722 编写于 作者: 朔-望's avatar 朔-望

modify to 2 spaces indent & format code & rm build folder

上级 e35ef6fe
...@@ -2,5 +2,4 @@ ...@@ -2,5 +2,4 @@
Language: Cpp Language: Cpp
BasedOnStyle: LLVM BasedOnStyle: LLVM
Standard: Cpp11 Standard: Cpp11
IndentWidth: 4
... ...
此差异已折叠。
...@@ -56,7 +56,7 @@ struct Print { ...@@ -56,7 +56,7 @@ struct Print {
return *this; return *this;
} }
private: private:
void print(LogLevel level) { void print(LogLevel level) {
buffer_ << std::endl; buffer_ << std::endl;
if (level == kLOG_ERROR) { if (level == kLOG_ERROR) {
...@@ -73,8 +73,7 @@ struct ToLog { ...@@ -73,8 +73,7 @@ struct ToLog {
: level_(level) { : level_(level) {
unsigned blanks = unsigned blanks =
(unsigned)(level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1); (unsigned)(level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1);
printer_ << logs[level] << " " << info << ":" printer_ << logs[level] << " " << info << ":" << std::string(blanks, ' ');
<< std::string(blanks, ' ');
} }
template <typename T> ToLog &operator<<(T const &value) { template <typename T> ToLog &operator<<(T const &value) {
...@@ -84,7 +83,7 @@ struct ToLog { ...@@ -84,7 +83,7 @@ struct ToLog {
~ToLog() { printer_.print(level_); } ~ToLog() { printer_.print(level_); }
private: private:
LogLevel level_; LogLevel level_;
Print printer_; Print printer_;
}; };
...@@ -93,10 +92,10 @@ struct ToLog { ...@@ -93,10 +92,10 @@ struct ToLog {
if (level > paddle_mobile::log_level) { \ if (level > paddle_mobile::log_level) { \
} else \ } else \
paddle_mobile::ToLog( \ paddle_mobile::ToLog( \
level, (std::stringstream() \ level, \
(std::stringstream() \
<< "[file: " \ << "[file: " \
<< (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) \ << (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) : __FILE__) \
: __FILE__) \
<< "] [line: " << __LINE__ << "] ") \ << "] [line: " << __LINE__ << "] ") \
.str()) .str())
...@@ -107,8 +106,7 @@ struct ToLog { ...@@ -107,8 +106,7 @@ struct ToLog {
paddle_mobile::kLOG_DEBUG, \ paddle_mobile::kLOG_DEBUG, \
(std::stringstream() \ (std::stringstream() \
<< "[file: " \ << "[file: " \
<< (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) \ << (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) : __FILE__) \
: __FILE__) \
<< "] [line: " << __LINE__ << "] ") \ << "] [line: " << __LINE__ << "] ") \
.str()) .str())
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -144,7 +142,7 @@ struct Print { ...@@ -144,7 +142,7 @@ struct Print {
friend struct ToLog; friend struct ToLog;
template <typename T> Print &operator<<(T const &value) {} template <typename T> Print &operator<<(T const &value) {}
private: private:
}; };
struct ToLog { struct ToLog {
......
...@@ -49,7 +49,7 @@ template <typename F> struct VariantHelper<F> { ...@@ -49,7 +49,7 @@ template <typename F> struct VariantHelper<F> {
}; };
template <size_t size> class RawData { template <size_t size> class RawData {
public: public:
char data[size]; char data[size];
RawData() {} RawData() {}
RawData(const RawData &raw_data) { strcpy(data, raw_data.data); } RawData(const RawData &raw_data) { strcpy(data, raw_data.data); }
...@@ -87,7 +87,7 @@ template <typename... Ts> struct Variant { ...@@ -87,7 +87,7 @@ template <typename... Ts> struct Variant {
size_t TypeId() const { return type_id; } size_t TypeId() const { return type_id; }
private: private:
static inline size_t invalid_type() { return typeid(void).hash_code(); } static inline size_t invalid_type() { return typeid(void).hash_code(); }
typedef VariantHelper<Ts...> helper; typedef VariantHelper<Ts...> helper;
size_t type_id; size_t type_id;
......
...@@ -27,7 +27,7 @@ namespace framework { ...@@ -27,7 +27,7 @@ namespace framework {
class BlockDesc; class BlockDesc;
class Attribute { class Attribute {
public: public:
static Attribute GetAttrValue(const proto::OpDesc::Attr &attr_desc) { static Attribute GetAttrValue(const proto::OpDesc::Attr &attr_desc) {
// std::cout << "begin get attr value" << std::endl; // std::cout << "begin get attr value" << std::endl;
Attribute attr; Attribute attr;
...@@ -100,7 +100,7 @@ class Attribute { ...@@ -100,7 +100,7 @@ class Attribute {
template <typename T> T &Get() const { return variant_.Get<T>(); } template <typename T> T &Get() const { return variant_.Get<T>(); }
private: private:
Variant<int, float, std::string, std::vector<int>, std::vector<float>, Variant<int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc *, std::vector<std::string>, bool, std::vector<bool>, BlockDesc *,
int64_t> int64_t>
...@@ -110,7 +110,7 @@ class Attribute { ...@@ -110,7 +110,7 @@ class Attribute {
using AttributeMap = std::unordered_map<std::string, Attribute>; using AttributeMap = std::unordered_map<std::string, Attribute>;
class AttrReader { class AttrReader {
public: public:
explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {} explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {}
template <typename T> inline T Get(const std::string &name) const { template <typename T> inline T Get(const std::string &name) const {
...@@ -121,7 +121,7 @@ class AttrReader { ...@@ -121,7 +121,7 @@ class AttrReader {
return ((Attribute)attrs_.at(name)).Get<T>(); return ((Attribute)attrs_.at(name)).Get<T>();
} }
private: private:
const AttributeMap &attrs_; const AttributeMap &attrs_;
}; };
......
...@@ -27,7 +27,7 @@ namespace paddle_mobile { ...@@ -27,7 +27,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
class BlockDesc : PaddleMobileObject { class BlockDesc : PaddleMobileObject {
public: public:
BlockDesc(const proto::BlockDesc &desc); BlockDesc(const proto::BlockDesc &desc);
const int &ID() const { return desc_.idx(); } const int &ID() const { return desc_.idx(); }
...@@ -35,8 +35,7 @@ class BlockDesc : PaddleMobileObject { ...@@ -35,8 +35,7 @@ class BlockDesc : PaddleMobileObject {
const int &Parent() const { return desc_.parent_idx(); } const int &Parent() const { return desc_.parent_idx(); }
bool operator==(const paddle_mobile::framework::BlockDesc &in_block) const { bool operator==(const paddle_mobile::framework::BlockDesc &in_block) const {
return this->ID() == in_block.ID() && return this->ID() == in_block.ID() && this->Parent() == in_block.Parent();
this->Parent() == in_block.Parent();
} }
bool operator<(const paddle_mobile::framework::BlockDesc &in_block) const { bool operator<(const paddle_mobile::framework::BlockDesc &in_block) const {
...@@ -46,7 +45,7 @@ class BlockDesc : PaddleMobileObject { ...@@ -46,7 +45,7 @@ class BlockDesc : PaddleMobileObject {
std::vector<std::shared_ptr<VarDesc>> Vars() const; std::vector<std::shared_ptr<VarDesc>> Vars() const;
std::vector<std::shared_ptr<OpDesc>> Ops() const; std::vector<std::shared_ptr<OpDesc>> Ops() const;
private: private:
proto::BlockDesc desc_; proto::BlockDesc desc_;
std::vector<std::shared_ptr<OpDesc>> ops_; std::vector<std::shared_ptr<OpDesc>> ops_;
std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_; std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_;
......
...@@ -90,26 +90,24 @@ DDim make_ddim(const std::vector<int> &dims) { ...@@ -90,26 +90,24 @@ DDim make_ddim(const std::vector<int> &dims) {
// XXX For some reason, putting this in an anonymous namespace causes // XXX For some reason, putting this in an anonymous namespace causes
// errors // errors
struct DynamicMutableIndexer : Vistor<int64_t &> { struct DynamicMutableIndexer : Vistor<int64_t &> {
public: public:
explicit DynamicMutableIndexer(int idx) : idx_(idx) {} explicit DynamicMutableIndexer(int idx) : idx_(idx) {}
template <int D> int64_t &operator()(Dim<D> &dim) const { template <int D> int64_t &operator()(Dim<D> &dim) const { return dim[idx_]; }
return dim[idx_];
}
private: private:
int idx_; int idx_;
}; };
struct DynamicConstIndexer : public Vistor<int64_t> { struct DynamicConstIndexer : public Vistor<int64_t> {
public: public:
explicit DynamicConstIndexer(int idx) : idx_(idx) {} explicit DynamicConstIndexer(int idx) : idx_(idx) {}
template <int D> int64_t operator()(const Dim<D> &dim) const { template <int D> int64_t operator()(const Dim<D> &dim) const {
return dim[idx_]; return dim[idx_];
} }
private: private:
int idx_; int idx_;
}; };
...@@ -288,7 +286,7 @@ struct OSVistor : Vistor<std::ostream &> { ...@@ -288,7 +286,7 @@ struct OSVistor : Vistor<std::ostream &> {
return os_ << dim; return os_ << dim;
} }
private: private:
std::ostream &os_; std::ostream &os_;
}; };
......
...@@ -123,9 +123,7 @@ template <> struct DimGetter<0> { ...@@ -123,9 +123,7 @@ template <> struct DimGetter<0> {
return d.head; return d.head;
} }
// Return a reference if Dim is mutable // Return a reference if Dim is mutable
template <typename D> HOSTDEVICE static int64_t &impl(D &d) { template <typename D> HOSTDEVICE static int64_t &impl(D &d) { return d.head; }
return d.head;
}
}; };
template <int D> HOSTDEVICE int64_t &indexer(Dim<D> &dim, int idx) { template <int D> HOSTDEVICE int64_t &indexer(Dim<D> &dim, int idx) {
......
...@@ -35,14 +35,14 @@ namespace paddle_mobile { ...@@ -35,14 +35,14 @@ namespace paddle_mobile {
namespace framework { namespace framework {
template <typename Dtype> class Executor { template <typename Dtype> class Executor {
public: public:
Executor(); Executor();
Executor(const Program<Dtype> p); Executor(const Program<Dtype> p);
std::shared_ptr<Tensor> predict(Tensor &t); std::shared_ptr<Tensor> predict(Tensor &t);
public: public:
const framework::Program<Dtype> program_; const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_; std::shared_ptr<ProgramDesc> to_predict_program_;
......
此差异已折叠。
此差异已折叠。
...@@ -152,8 +152,7 @@ bool CheckLoD(const LoD &in, int tensor_height) { ...@@ -152,8 +152,7 @@ bool CheckLoD(const LoD &in, int tensor_height) {
// check: all the offsets in a level should be ascending(no same // check: all the offsets in a level should be ascending(no same
// items // items
// allows). // allows).
if (!std::is_sorted(level.begin(), level.begin(), if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
[](size_t a, size_t b) {
if (a < b) if (a < b)
return true; return true;
return false; return false;
...@@ -188,8 +187,7 @@ bool CheckAbsLoD(const LoD &in, int tensor_height) { ...@@ -188,8 +187,7 @@ bool CheckAbsLoD(const LoD &in, int tensor_height) {
// check: all the offsets in a level should be ascending(no same // check: all the offsets in a level should be ascending(no same
// items // items
// allows). // allows).
if (!std::is_sorted(level.begin(), level.begin(), if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
[](size_t a, size_t b) {
if (a < b) if (a < b)
return true; return true;
return false; return false;
......
...@@ -102,7 +102,7 @@ bool CheckAbsLoD(const LoD &in, int tensor_height = -1); ...@@ -102,7 +102,7 @@ bool CheckAbsLoD(const LoD &in, int tensor_height = -1);
* see https://en.wikipedia.org/wiki/Level_of_details for reference. * see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/ */
class LoDTensor : public Tensor { class LoDTensor : public Tensor {
public: public:
LoDTensor() : Tensor() {} LoDTensor() : Tensor() {}
explicit LoDTensor(const LoD &lod) : lod_(lod) {} explicit LoDTensor(const LoD &lod) : lod_(lod) {}
...@@ -139,7 +139,7 @@ class LoDTensor : public Tensor { ...@@ -139,7 +139,7 @@ class LoDTensor : public Tensor {
return (lod_)[level].size() - 1; return (lod_)[level].size() - 1;
} }
private: private:
LoD lod_; LoD lod_;
}; };
......
...@@ -26,7 +26,7 @@ namespace paddle_mobile { ...@@ -26,7 +26,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
class OpDesc : PaddleMobileObject { class OpDesc : PaddleMobileObject {
public: public:
OpDesc(const proto::OpDesc &desc); OpDesc(const proto::OpDesc &desc);
const std::vector<std::string> &Input(const std::string &name) const; const std::vector<std::string> &Input(const std::string &name) const;
const std::vector<std::string> &Output(const std::string &name) const; const std::vector<std::string> &Output(const std::string &name) const;
...@@ -40,7 +40,7 @@ class OpDesc : PaddleMobileObject { ...@@ -40,7 +40,7 @@ class OpDesc : PaddleMobileObject {
const std::string &Type() { return desc_.type(); }; const std::string &Type() { return desc_.type(); };
private: private:
proto::OpDesc desc_; proto::OpDesc desc_;
VariableNameMap inputs_; VariableNameMap inputs_;
VariableNameMap outputs_; VariableNameMap outputs_;
......
...@@ -39,7 +39,7 @@ template <typename Dtype> class OpInfoMap; ...@@ -39,7 +39,7 @@ template <typename Dtype> class OpInfoMap;
template <typename Dtype> static OpInfoMap<Dtype> *g_op_info_map = nullptr; template <typename Dtype> static OpInfoMap<Dtype> *g_op_info_map = nullptr;
template <typename Dtype> class OpInfoMap { template <typename Dtype> class OpInfoMap {
public: public:
static OpInfoMap &Instance() { static OpInfoMap &Instance() {
if (g_op_info_map<Dtype> == nullptr) { if (g_op_info_map<Dtype> == nullptr) {
g_op_info_map<Dtype> = new OpInfoMap(); g_op_info_map<Dtype> = new OpInfoMap();
...@@ -83,7 +83,7 @@ template <typename Dtype> class OpInfoMap { ...@@ -83,7 +83,7 @@ template <typename Dtype> class OpInfoMap {
return &map_; return &map_;
} }
private: private:
OpInfoMap() = default; OpInfoMap() = default;
std::unordered_map<std::string, OpInfo<Dtype>> map_; std::unordered_map<std::string, OpInfo<Dtype>> map_;
......
...@@ -27,8 +27,7 @@ struct OpKernelType { ...@@ -27,8 +27,7 @@ struct OpKernelType {
struct Hash { struct Hash {
size_t operator()(const OpKernelType &key) const { size_t operator()(const OpKernelType &key) const {
int data_type = static_cast<int>(key.data_type_) << LEFT_SHIFT; int data_type = static_cast<int>(key.data_type_) << LEFT_SHIFT;
int data_layout = static_cast<int>(key.data_layout_) int data_layout = static_cast<int>(key.data_layout_) << (LEFT_SHIFT * 2);
<< (LEFT_SHIFT * 2);
std::hash<int> hasher; std::hash<int> hasher;
return hasher(data_type + data_layout); return hasher(data_type + data_layout);
......
...@@ -49,7 +49,7 @@ static std::unordered_map< ...@@ -49,7 +49,7 @@ static std::unordered_map<
{"fetch", {{"X"}, {"Out"}}}}; {"fetch", {{"X"}, {"Out"}}}};
template <typename Dtype> class OperatorBase : PaddleMobileObject { template <typename Dtype> class OperatorBase : PaddleMobileObject {
public: public:
OperatorBase(const std::string &type, const VariableNameMap &inputs, OperatorBase(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs, const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope); std::shared_ptr<Scope> scope);
...@@ -66,30 +66,30 @@ template <typename Dtype> class OperatorBase : PaddleMobileObject { ...@@ -66,30 +66,30 @@ template <typename Dtype> class OperatorBase : PaddleMobileObject {
} }
} }
protected: protected:
std::shared_ptr<Scope> scope_; std::shared_ptr<Scope> scope_;
std::string type_; std::string type_;
VariableNameMap inputs_; VariableNameMap inputs_;
VariableNameMap outputs_; VariableNameMap outputs_;
AttributeMap attrs_; AttributeMap attrs_;
private: private:
void CheckAllInputOutputSet() const; void CheckAllInputOutputSet() const;
}; };
template <typename Dtype> template <typename Dtype>
class OperatorWithKernel : public OperatorBase<Dtype> { class OperatorWithKernel : public OperatorBase<Dtype> {
public: public:
OperatorWithKernel(const std::string &type, const VariableNameMap &inputs, OperatorWithKernel(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const AttributeMap &attrs,
const AttributeMap &attrs, std::shared_ptr<Scope> scope) std::shared_ptr<Scope> scope)
: OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {} : OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {}
virtual void InferShape() const = 0; virtual void InferShape() const = 0;
virtual void Run() const = 0; virtual void Run() const = 0;
}; };
template <typename Dtype, typename P> class OpKernelBase : PaddleMobileObject { template <typename Dtype, typename P> class OpKernelBase : PaddleMobileObject {
public: public:
virtual void Compute(const P &para) const = 0; virtual void Compute(const P &para) const = 0;
virtual ~OpKernelBase() = default; virtual ~OpKernelBase() = default;
......
...@@ -24,13 +24,13 @@ SOFTWARE. ...@@ -24,13 +24,13 @@ SOFTWARE.
namespace paddle_mobile { namespace paddle_mobile {
class PaddleMobileObject { class PaddleMobileObject {
public: public:
virtual std::string ToString() { virtual std::string ToString() {
char address[128] = {0}; char address[128] = {0};
sprintf(address, "%p", this); sprintf(address, "%p", this);
return std::string(address); return std::string(address);
} }
private: private:
}; };
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -29,7 +29,7 @@ namespace paddle_mobile { ...@@ -29,7 +29,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
class Node : PaddleMobileObject { class Node : PaddleMobileObject {
public: public:
Node(const std::string &type) : type_(type) {} Node(const std::string &type) : type_(type) {}
Node(std::shared_ptr<OpDesc> op_desc) Node(std::shared_ptr<OpDesc> op_desc)
: op_desc_(op_desc), type_(op_desc->Type()){}; : op_desc_(op_desc), type_(op_desc->Type()){};
...@@ -39,7 +39,7 @@ class Node : PaddleMobileObject { ...@@ -39,7 +39,7 @@ class Node : PaddleMobileObject {
Node &To(int index); Node &To(int index);
uint depth(uint begin = 0); uint depth(uint begin = 0);
private: private:
std::shared_ptr<OpDesc> op_desc_; std::shared_ptr<OpDesc> op_desc_;
std::string ToString(std::string blank, const Node *node) const; std::string ToString(std::string blank, const Node *node) const;
std::vector<std::shared_ptr<Node>> outputs_; std::vector<std::shared_ptr<Node>> outputs_;
......
...@@ -35,8 +35,7 @@ ProgramOptimize::FushionOptimize(std::shared_ptr<ProgramDesc> ori_des) { ...@@ -35,8 +35,7 @@ ProgramOptimize::FushionOptimize(std::shared_ptr<ProgramDesc> ori_des) {
auto op = block->Ops()[j]; auto op = block->Ops()[j];
auto op_type = op->Type(); auto op_type = op->Type();
// DLOG << "op type: " << op_type << " index: " << j; // DLOG << "op type: " << op_type << " index: " << j;
if (op_input_output_key.find(op->Type()) == if (op_input_output_key.find(op->Type()) == op_input_output_key.end()) {
op_input_output_key.end()) {
return NULL; return NULL;
} }
......
...@@ -26,13 +26,13 @@ namespace paddle_mobile { ...@@ -26,13 +26,13 @@ namespace paddle_mobile {
namespace framework { namespace framework {
class ProgramOptimize { class ProgramOptimize {
public: public:
ProgramOptimize() {} ProgramOptimize() {}
std::shared_ptr<ProgramDesc> Optimize(); std::shared_ptr<ProgramDesc> Optimize();
std::shared_ptr<ProgramDesc> std::shared_ptr<ProgramDesc>
FushionOptimize(std::shared_ptr<ProgramDesc> ori_des); FushionOptimize(std::shared_ptr<ProgramDesc> ori_des);
private: private:
// std::shared_ptr<ProgramDesc> ori_desc_; // std::shared_ptr<ProgramDesc> ori_desc_;
std::vector<std::unordered_map<std::string, std::shared_ptr<Node>>> std::vector<std::unordered_map<std::string, std::shared_ptr<Node>>>
outputs_nodes_; outputs_nodes_;
......
...@@ -28,12 +28,12 @@ namespace framework { ...@@ -28,12 +28,12 @@ namespace framework {
template <typename Dtype, Precision P = Precision::FP32> template <typename Dtype, Precision P = Precision::FP32>
class Program : PaddleMobileObject { class Program : PaddleMobileObject {
public: public:
std::shared_ptr<ProgramDesc> originProgram; std::shared_ptr<ProgramDesc> originProgram;
std::shared_ptr<ProgramDesc> optimizeProgram; std::shared_ptr<ProgramDesc> optimizeProgram;
std::shared_ptr<Scope> scope; std::shared_ptr<Scope> scope;
private: private:
}; };
} // namespace framework } // namespace framework
......
...@@ -28,12 +28,12 @@ namespace paddle_mobile { ...@@ -28,12 +28,12 @@ namespace paddle_mobile {
namespace framework { namespace framework {
class ProgramDesc : PaddleMobileObject { class ProgramDesc : PaddleMobileObject {
public: public:
ProgramDesc(const proto::ProgramDesc &desc); ProgramDesc(const proto::ProgramDesc &desc);
std::shared_ptr<BlockDesc> Block(size_t idx); std::shared_ptr<BlockDesc> Block(size_t idx);
const std::vector<std::shared_ptr<BlockDesc>> &Blocks() { return blocks_; }; const std::vector<std::shared_ptr<BlockDesc>> &Blocks() { return blocks_; };
private: private:
std::vector<std::shared_ptr<BlockDesc>> blocks_; std::vector<std::shared_ptr<BlockDesc>> blocks_;
proto::ProgramDesc desc_; proto::ProgramDesc desc_;
}; };
......
...@@ -26,7 +26,7 @@ SOFTWARE. ...@@ -26,7 +26,7 @@ SOFTWARE.
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
class Scope { class Scope {
public: public:
Scope() {} Scope() {}
~Scope() {} ~Scope() {}
...@@ -67,7 +67,7 @@ class Scope { ...@@ -67,7 +67,7 @@ class Scope {
Variable *FindVarLocally(const std::string &name) const; Variable *FindVarLocally(const std::string &name) const;
private: private:
// Call Scope::NewScope for a sub-scope. // Call Scope::NewScope for a sub-scope.
explicit Scope(Scope const *parent) : parent_(parent) {} explicit Scope(Scope const *parent) : parent_(parent) {}
......
...@@ -27,7 +27,7 @@ namespace paddle_mobile { ...@@ -27,7 +27,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
class SelectedRows { class SelectedRows {
public: public:
SelectedRows(const std::vector<int64_t> &rows, const int64_t &height) SelectedRows(const std::vector<int64_t> &rows, const int64_t &height)
: rows_(rows), height_(height) { : rows_(rows), height_(height) {
value_.reset(new Tensor()); value_.reset(new Tensor());
...@@ -67,7 +67,7 @@ class SelectedRows { ...@@ -67,7 +67,7 @@ class SelectedRows {
return make_ddim(dims); return make_ddim(dims);
} }
private: private:
// Notice: rows can be duplicate. We can have {0, 4, 7, 0, 5, 7, 9} // Notice: rows can be duplicate. We can have {0, 4, 7, 0, 5, 7, 9}
// here. // here.
// SelectedRows are simply concated when adding together. Until a // SelectedRows are simply concated when adding together. Until a
......
...@@ -56,8 +56,7 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> { ...@@ -56,8 +56,7 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> {
}; };
static inline size_t SizeOfType(std::type_index type) { static inline size_t SizeOfType(std::type_index type) {
SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t> SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t> functor;
functor;
size_t size = functor(type); size_t size = functor(type);
// PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", // PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s",
// type.name()); // type.name());
...@@ -67,7 +66,7 @@ static inline size_t SizeOfType(std::type_index type) { ...@@ -67,7 +66,7 @@ static inline size_t SizeOfType(std::type_index type) {
class LoDTensor; class LoDTensor;
class Tensor { class Tensor {
public: public:
Tensor() : offset_(0) {} Tensor() : offset_(0) {}
/*! Return a pointer to mutable memory block. */ /*! Return a pointer to mutable memory block. */
...@@ -78,8 +77,8 @@ class Tensor { ...@@ -78,8 +77,8 @@ class Tensor {
// typeid(T).hash_code(), // typeid(T).hash_code(),
// "Tensor holds the wrong type, it holds %s", // "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name()); // this->holder_->type().name());
return reinterpret_cast<T *>( return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_); offset_);
} }
/*! Return a pointer to constant memory block. */ /*! Return a pointer to constant memory block. */
...@@ -236,7 +235,7 @@ class Tensor { ...@@ -236,7 +235,7 @@ class Tensor {
inline void set_layout(const DataLayout layout) { layout_ = layout; } inline void set_layout(const DataLayout layout) { layout_ = layout; }
private: private:
/** /**
* @note Placeholder hides type T, so it doesn't appear as a * @note Placeholder hides type T, so it doesn't appear as a
* template * template
......
...@@ -189,8 +189,7 @@ void TensorFromStream(std::istream &is, framework::Tensor *tensor) { ...@@ -189,8 +189,7 @@ void TensorFromStream(std::istream &is, framework::Tensor *tensor) {
{ // read tensor { // read tensor
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size())); dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(), std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
std::back_inserter(dims));
tensor->Resize(framework::make_ddim(dims)); tensor->Resize(framework::make_ddim(dims));
void *buf; void *buf;
......
...@@ -25,7 +25,7 @@ namespace paddle_mobile { ...@@ -25,7 +25,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
class VarDesc { class VarDesc {
public: public:
VarDesc(const proto::VarDesc &desc); VarDesc(const proto::VarDesc &desc);
std::string Name() const { return desc_.name(); } std::string Name() const { return desc_.name(); }
...@@ -80,7 +80,7 @@ class VarDesc { ...@@ -80,7 +80,7 @@ class VarDesc {
return this->RepeatedToVector(tensor_desc().dims()); return this->RepeatedToVector(tensor_desc().dims());
} }
private: private:
proto::VarDesc desc_; proto::VarDesc desc_;
}; };
......
...@@ -28,7 +28,7 @@ SOFTWARE. ...@@ -28,7 +28,7 @@ SOFTWARE.
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
class Variable : public PaddleMobileObject { class Variable : public PaddleMobileObject {
public: public:
template <typename T> const T *Get() const { template <typename T> const T *Get() const {
return static_cast<const T *>(holder_->Ptr()); return static_cast<const T *>(holder_->Ptr());
} }
...@@ -67,7 +67,7 @@ class Variable : public PaddleMobileObject { ...@@ -67,7 +67,7 @@ class Variable : public PaddleMobileObject {
void SetName(const std::string *name) { name_ = name; } void SetName(const std::string *name) { name_ = name; }
private: private:
struct Placeholder { struct Placeholder {
Placeholder() = default; Placeholder() = default;
virtual ~Placeholder() = default; virtual ~Placeholder() = default;
......
...@@ -174,10 +174,8 @@ Loader<Dtype, P>::Load(const std::string &dirname) { ...@@ -174,10 +174,8 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
auto var = scope->Var(var_desc->Name()); auto var = scope->Var(var_desc->Name());
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
if (var_desc->Persistable() && if (var_desc->Persistable() &&
var_desc->GetType() != var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
framework::proto::VarType::FEED_MINIBATCH && var_desc->GetType() != framework::proto::VarType::FETCH_LIST) {
var_desc->GetType() !=
framework::proto::VarType::FETCH_LIST) {
framework::LoDTensor *tensor = framework::LoDTensor *tensor =
var->GetMutable<framework::LoDTensor>(); var->GetMutable<framework::LoDTensor>();
// to load // to load
...@@ -268,8 +266,7 @@ Loader<Dtype, P>::Load(const std::string &dirname) { ...@@ -268,8 +266,7 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
} }
if (var.persistable() && if (var.persistable() &&
var.type().type() != var.type().type() != framework::proto::VarType::FEED_MINIBATCH &&
framework::proto::VarType::FEED_MINIBATCH &&
var.type().type() != framework::proto::VarType::FETCH_LIST) { var.type().type() != framework::proto::VarType::FETCH_LIST) {
// std::cout << " to load " << var.name() << // std::cout << " to load " << var.name() <<
// std::endl; // std::endl;
...@@ -289,8 +286,7 @@ Loader<Dtype, P>::Load(const std::string &dirname) { ...@@ -289,8 +286,7 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
// 2 Lod information // 2 Lod information
uint64_t lod_level; uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
sizeof(lod_level));
// std::cout << " load level: " << lod_level << // std::cout << " load level: " << lod_level <<
// std::endl; // std::endl;
// std::cout << " lod info: " << std::endl; // std::cout << " lod info: " << std::endl;
......
...@@ -29,10 +29,10 @@ namespace paddle_mobile { ...@@ -29,10 +29,10 @@ namespace paddle_mobile {
template <typename Dtype, Precision P = Precision::FP32> template <typename Dtype, Precision P = Precision::FP32>
class Loader : PaddleMobileObject { class Loader : PaddleMobileObject {
public: public:
const framework::Program<Dtype, P> Load(const std::string &dirname); const framework::Program<Dtype, P> Load(const std::string &dirname);
private: private:
void LoadVar(framework::LoDTensor *tensor, const std::string &file_path); void LoadVar(framework::LoDTensor *tensor, const std::string &file_path);
}; };
......
...@@ -40,7 +40,7 @@ void Free(void *ptr); ...@@ -40,7 +40,7 @@ void Free(void *ptr);
template <typename T> class PODDeleter { template <typename T> class PODDeleter {
static_assert(std::is_pod<T>::value, "T must be POD"); static_assert(std::is_pod<T>::value, "T must be POD");
public: public:
explicit PODDeleter(){}; explicit PODDeleter(){};
void operator()(T *ptr) { Free(static_cast<void *>(ptr)); } void operator()(T *ptr) { Free(static_cast<void *>(ptr)); }
...@@ -55,7 +55,7 @@ template <typename T> class PODDeleter { ...@@ -55,7 +55,7 @@ template <typename T> class PODDeleter {
* reinterpret_cast * reinterpret_cast
*/ */
template <typename T> class PlainDeleter { template <typename T> class PlainDeleter {
public: public:
explicit PlainDeleter(){}; explicit PlainDeleter(){};
void operator()(T *ptr) { Free(reinterpret_cast<void *>(ptr)); } void operator()(T *ptr) { Free(reinterpret_cast<void *>(ptr)); }
......
...@@ -27,13 +27,13 @@ using namespace framework; ...@@ -27,13 +27,13 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class BatchNormOp : public framework::OperatorWithKernel<DeviceType> { class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
BatchNormOp(const std::string &type, const VariableNameMap &inputs, BatchNormOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap attrs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
attrs, scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void Run() const {
...@@ -44,7 +44,7 @@ class BatchNormOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -44,7 +44,7 @@ class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
BatchNormParam param_; BatchNormParam param_;
}; };
......
...@@ -26,13 +26,12 @@ using namespace framework; ...@@ -26,13 +26,12 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ConcatOp : public framework::OperatorWithKernel<DeviceType> { class ConcatOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
ConcatOp(const std::string &type, const VariableNameMap &inputs, ConcatOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
attrs, scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void Run() const {
...@@ -43,7 +42,7 @@ class ConcatOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -43,7 +42,7 @@ class ConcatOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
ConcatParam param_; ConcatParam param_;
}; };
......
...@@ -60,9 +60,9 @@ void ConvOp<Dtype, T>::InferShape() const { ...@@ -60,9 +60,9 @@ void ConvOp<Dtype, T>::InferShape() const {
std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]}); std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
for (size_t i = 0; i < strides.size(); ++i) { for (size_t i = 0; i < strides.size(); ++i) {
output_shape.push_back(ConvOutputSize(in_dims[i + 2], output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
filter_dims[i + 2], dilations[i], dilations[i], paddings[i],
paddings[i], strides[i])); strides[i]));
} }
framework::DDim ddim = framework::make_ddim(output_shape); framework::DDim ddim = framework::make_ddim(output_shape);
......
...@@ -28,12 +28,12 @@ using namespace framework; ...@@ -28,12 +28,12 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ConvOp : public framework::OperatorWithKernel<DeviceType> { class ConvOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
ConvOp(const std::string &type, const VariableNameMap &inputs, ConvOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
attrs, scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
...@@ -45,7 +45,7 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -45,7 +45,7 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> {
this->ClearVariables({"Filter", "Input"}); this->ClearVariables({"Filter", "Input"});
} }
private: private:
ConvParam param_; ConvParam param_;
}; };
......
...@@ -27,13 +27,13 @@ using namespace framework; ...@@ -27,13 +27,13 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> { class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
ElementwiseAddOp(const std::string &type, const VariableNameMap &inputs, ElementwiseAddOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap attrs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
attrs, scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void Run() const {
...@@ -44,7 +44,7 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -44,7 +44,7 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
ElementwiseAddParam param_; ElementwiseAddParam param_;
}; };
} // namespace operators } // namespace operators
......
...@@ -67,15 +67,14 @@ void BatchNormKernel<CPU, float>::Compute(const BatchNormParam &param) const { ...@@ -67,15 +67,14 @@ void BatchNormKernel<CPU, float>::Compute(const BatchNormParam &param) const {
/// (x * inv_var * scale) + (bias - est_mean * inv_var * scale) /// (x * inv_var * scale) + (bias - est_mean * inv_var * scale)
for (int i = 0; i < C; i++) { for (int i = 0; i < C; i++) {
new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i]; new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i];
new_bias_ptr[i] = new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i];
bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i];
{ {
for (int n = 0; n < N; n++) { for (int n = 0; n < N; n++) {
for (int h = 0; h < H; h++) { for (int h = 0; h < H; h++) {
for (int w = 0; w < W; w++) { for (int w = 0; w < W; w++) {
int index = n * stride0 + i * stride1 + h * stride2 + w; int index = n * stride0 + i * stride1 + h * stride2 + w;
out_ptr[index] = input_x_ptr[index] * new_scale_ptr[i] + out_ptr[index] =
new_bias_ptr[i]; input_x_ptr[index] * new_scale_ptr[i] + new_bias_ptr[i];
} }
} }
} }
......
...@@ -19,7 +19,7 @@ limitations under the License. */ ...@@ -19,7 +19,7 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename T> class ConcatFunctor { template <typename T> class ConcatFunctor {
public: public:
void operator()(const std::vector<framework::Tensor> &input, const int axis, void operator()(const std::vector<framework::Tensor> &input, const int axis,
framework::Tensor *output) { framework::Tensor *output) {
size_t num = input.size(); size_t num = input.size();
...@@ -80,8 +80,7 @@ void StridedNumelCopyWithAxis(int64_t axis, T *dst, ...@@ -80,8 +80,7 @@ void StridedNumelCopyWithAxis(int64_t axis, T *dst,
} }
for (int64_t i = 0; i < before; ++i) { for (int64_t i = 0; i < before; ++i) {
memory::Copy(dst + i * dst_after, src + i * src_after, memory::Copy(dst + i * dst_after, src + i * src_after, sizeof(T) * size);
sizeof(T) * size);
} }
} }
...@@ -98,9 +97,9 @@ void ConcatKernel<CPU, float>::Compute(const ConcatParam &param) const { ...@@ -98,9 +97,9 @@ void ConcatKernel<CPU, float>::Compute(const ConcatParam &param) const {
for (auto *in : inputs) { for (auto *in : inputs) {
auto in_stride = framework::stride_numel(in->dims()); auto in_stride = framework::stride_numel(in->dims());
auto out_stride = framework::stride_numel(out->dims()); auto out_stride = framework::stride_numel(out->dims());
StridedNumelCopyWithAxis<float>( StridedNumelCopyWithAxis<float>(axis, out->data<float>() + output_offset,
axis, out->data<float>() + output_offset, out_stride, out_stride, in->data<float>(), in_stride,
in->data<float>(), in_stride, in_stride[axis]); in_stride[axis]);
output_offset += in_stride[axis]; output_offset += in_stride[axis];
} }
} else { } else {
......
...@@ -138,12 +138,10 @@ template <> void ConvKernel<CPU, float>::Compute(const ConvParam &param) const { ...@@ -138,12 +138,10 @@ template <> void ConvKernel<CPU, float>::Compute(const ConvParam &param) const {
} }
// gemm // gemm
Tensor out_slice = Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
out_batch.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = math::matmul<float>(filter_slice, false, col_matrix, false, float(1.0),
filter.Slice(g * out_step, (g + 1) * out_step); &out_slice, float(0.0));
math::matmul<float>(filter_slice, false, col_matrix, false,
float(1.0), &out_slice, float(0.0));
} }
} }
} }
......
...@@ -28,7 +28,7 @@ using namespace framework; ...@@ -28,7 +28,7 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class BatchNormKernel class BatchNormKernel
: public framework::OpKernelBase<DeviceType, BatchNormParam> { : public framework::OpKernelBase<DeviceType, BatchNormParam> {
public: public:
void Compute(const BatchNormParam &param) const; void Compute(const BatchNormParam &param) const;
}; };
......
...@@ -26,7 +26,7 @@ using namespace framework; ...@@ -26,7 +26,7 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ConcatKernel : public framework::OpKernelBase<DeviceType, ConcatParam> { class ConcatKernel : public framework::OpKernelBase<DeviceType, ConcatParam> {
public: public:
void Compute(const ConcatParam &param) const; void Compute(const ConcatParam &param) const;
}; };
......
...@@ -31,7 +31,7 @@ using namespace framework; ...@@ -31,7 +31,7 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ConvKernel : public framework::OpKernelBase<DeviceType, ConvParam> { class ConvKernel : public framework::OpKernelBase<DeviceType, ConvParam> {
public: public:
void Compute(const ConvParam &param) const; void Compute(const ConvParam &param) const;
}; };
} // namespace operators } // namespace operators
......
...@@ -29,7 +29,7 @@ using namespace framework; ...@@ -29,7 +29,7 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ElementwiseAddKernel class ElementwiseAddKernel
: public framework::OpKernelBase<DeviceType, ElementwiseAddParam> { : public framework::OpKernelBase<DeviceType, ElementwiseAddParam> {
public: public:
void Compute(const ElementwiseAddParam &param) const; void Compute(const ElementwiseAddParam &param) const;
}; };
} // namespace operators } // namespace operators
......
...@@ -26,8 +26,8 @@ namespace operators { ...@@ -26,8 +26,8 @@ namespace operators {
using namespace framework; using namespace framework;
template <typename T> struct LRNFunctor { template <typename T> struct LRNFunctor {
void operator()(const framework::Tensor &input, framework::Tensor *out, void operator()(const framework::Tensor &input, framework::Tensor *out, int N,
int N, int C, int H, int W, int n, T k, T alpha, T beta) { int C, int H, int W, int n, T k, T alpha, T beta) {
auto input_ptr = input.data<T>(); auto input_ptr = input.data<T>();
const int start = -(n - 1) / 2; const int start = -(n - 1) / 2;
const int end = start + n; const int end = start + n;
...@@ -47,14 +47,11 @@ template <typename T> struct LRNFunctor { ...@@ -47,14 +47,11 @@ template <typename T> struct LRNFunctor {
if (channel >= 0 && channel < C) { if (channel >= 0 && channel < C) {
for (int c = 0; c < H; c++) { for (int c = 0; c < H; c++) {
for (int d = 0; d < W; d++) { for (int d = 0; d < W; d++) {
int u = int u = a * stride0 + b * stride1 + c * stride2 + d;
a * stride0 + b * stride1 + c * stride2 + d;
int i = a * stride0 + channel * stride1 + int i = a * stride0 + channel * stride1 + c * stride2 + d;
c * stride2 + d;
sqr_buffer_ptr[u] += sqr_buffer_ptr[u] += alpha * input_ptr[i] * input_ptr[i];
alpha * input_ptr[i] * input_ptr[i];
} }
} }
} }
...@@ -70,7 +67,7 @@ template <typename T> struct LRNFunctor { ...@@ -70,7 +67,7 @@ template <typename T> struct LRNFunctor {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class LrnKernel : public framework::OpKernelBase<DeviceType, LrnParam> { class LrnKernel : public framework::OpKernelBase<DeviceType, LrnParam> {
public: public:
void Compute(const LrnParam &param) const; void Compute(const LrnParam &param) const;
}; };
} // namespace operators } // namespace operators
......
...@@ -28,7 +28,7 @@ using namespace framework; ...@@ -28,7 +28,7 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class MulKernel : public framework::OpKernelBase<DeviceType, MulParam> { class MulKernel : public framework::OpKernelBase<DeviceType, MulParam> {
public: public:
void Compute(const MulParam &param) const; void Compute(const MulParam &param) const;
}; };
} // namespace operators } // namespace operators
......
...@@ -28,7 +28,7 @@ using namespace framework; ...@@ -28,7 +28,7 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class PoolKernel : public framework::OpKernelBase<DeviceType, PoolParam> { class PoolKernel : public framework::OpKernelBase<DeviceType, PoolParam> {
public: public:
void Compute(const PoolParam &param) const; void Compute(const PoolParam &param) const;
}; };
} // namespace operators } // namespace operators
......
...@@ -27,12 +27,12 @@ using namespace framework; ...@@ -27,12 +27,12 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class LrnOp : public framework::OperatorWithKernel<DeviceType> { class LrnOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
LrnOp(const std::string &type, const VariableNameMap &inputs, LrnOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
attrs, scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void Run() const {
...@@ -43,7 +43,7 @@ class LrnOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -43,7 +43,7 @@ class LrnOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
LrnParam param_; LrnParam param_;
}; };
......
...@@ -69,7 +69,7 @@ inline void trim_trailing_singular_dims(framework::DDim *dims) { ...@@ -69,7 +69,7 @@ inline void trim_trailing_singular_dims(framework::DDim *dims) {
} }
template <typename T> class RowwiseTransformIterator { template <typename T> class RowwiseTransformIterator {
public: public:
RowwiseTransformIterator(const T *ptr, int n) : ptr_(ptr), i_(0), n_(n) {} RowwiseTransformIterator(const T *ptr, int n) : ptr_(ptr), i_(0), n_(n) {}
RowwiseTransformIterator<T> &operator++() { RowwiseTransformIterator<T> &operator++() {
...@@ -90,7 +90,7 @@ template <typename T> class RowwiseTransformIterator { ...@@ -90,7 +90,7 @@ template <typename T> class RowwiseTransformIterator {
const T &operator*() { return ptr_[i_]; } const T &operator*() { return ptr_[i_]; }
private: private:
const T *ptr_; const T *ptr_;
int i_; int i_;
int64_t n_; int64_t n_;
...@@ -101,7 +101,7 @@ template <typename T> class RowwiseTransformIterator { ...@@ -101,7 +101,7 @@ template <typename T> class RowwiseTransformIterator {
/// in (4,20,2) is 2 , /// in (4,20,2) is 2 ,
/// (20,1) move 1 stride , to fill(add) 2 element with the same number. /// (20,1) move 1 stride , to fill(add) 2 element with the same number.
template <typename T> class MidWiseTransformIterator { template <typename T> class MidWiseTransformIterator {
public: public:
MidWiseTransformIterator(const T *ptr, int n, int post) MidWiseTransformIterator(const T *ptr, int n, int post)
: ptr_(ptr), i_(0), j_(0), n_(n), post_(post) {} : ptr_(ptr), i_(0), j_(0), n_(n), post_(post) {}
...@@ -127,7 +127,7 @@ template <typename T> class MidWiseTransformIterator { ...@@ -127,7 +127,7 @@ template <typename T> class MidWiseTransformIterator {
const T &operator*() { return ptr_[i_]; } const T &operator*() { return ptr_[i_]; }
private: private:
const T *ptr_; const T *ptr_;
int64_t i_; int64_t i_;
int64_t j_; int64_t j_;
...@@ -137,7 +137,7 @@ template <typename T> class MidWiseTransformIterator { ...@@ -137,7 +137,7 @@ template <typename T> class MidWiseTransformIterator {
template <typename Functor, typename T, typename OutType = T> template <typename Functor, typename T, typename OutType = T>
class TransformFunctor { class TransformFunctor {
public: public:
TransformFunctor(const framework::Tensor *x, const framework::Tensor *y, TransformFunctor(const framework::Tensor *x, const framework::Tensor *y,
framework::Tensor *z, Functor func) framework::Tensor *z, Functor func)
: x_(x->data<T>()), y_(y->data<T>()), z_(z->mutable_data<OutType>()), : x_(x->data<T>()), y_(y->data<T>()), z_(z->mutable_data<OutType>()),
...@@ -156,11 +156,10 @@ class TransformFunctor { ...@@ -156,11 +156,10 @@ class TransformFunctor {
inline void RunMidWise(int n, int pre, int post) const { inline void RunMidWise(int n, int pre, int post) const {
math::Transform trans; math::Transform trans;
trans(x_, x_ + nx_, MidWiseTransformIterator<T>(y_, n, post), z_, trans(x_, x_ + nx_, MidWiseTransformIterator<T>(y_, n, post), z_, func_);
func_);
} }
private: private:
const T *x_; const T *x_;
const T *y_; const T *y_;
OutType *z_; OutType *z_;
......
...@@ -26,9 +26,8 @@ namespace math { ...@@ -26,9 +26,8 @@ namespace math {
* output_width] * output_width]
*/ */
template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> { template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
public: public:
void operator()(const framework::Tensor &im, void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col) { const std::vector<int> &padding, framework::Tensor *col) {
// PADDLE_ENFORCE(im.dims().size() == 3); // PADDLE_ENFORCE(im.dims().size() == 3);
...@@ -72,17 +71,13 @@ template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> { ...@@ -72,17 +71,13 @@ template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
int h_offset = (c / filter_width) % filter_height; int h_offset = (c / filter_width) % filter_height;
int c_im = c / (filter_width * filter_height); int c_im = c / (filter_width * filter_height);
for (int h = 0; h < col_height; ++h) { for (int h = 0; h < col_height; ++h) {
int im_row_idx = int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0];
h * stride[0] - padding[0] + h_offset * dilation[0];
for (int w = 0; w < col_width; ++w) { for (int w = 0; w < col_width; ++w) {
int im_col_idx = int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1];
w * stride[1] - padding[1] + w_offset * dilation[1];
int col_idx = (c * col_height + h) * col_width + w; int col_idx = (c * col_height + h) * col_width + w;
int im_idx = int im_idx = (im_row_idx + c_im * im_height) * im_width + im_col_idx;
(im_row_idx + c_im * im_height) * im_width + im_col_idx;
col_data[col_idx] = col_data[col_idx] = (im_row_idx < 0 || im_row_idx >= im_height ||
(im_row_idx < 0 || im_row_idx >= im_height ||
im_col_idx < 0 || im_col_idx >= im_width) im_col_idx < 0 || im_col_idx >= im_width)
? static_cast<T>(0) ? static_cast<T>(0)
: im_data[im_idx]; : im_data[im_idx];
...@@ -99,7 +94,7 @@ template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> { ...@@ -99,7 +94,7 @@ template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
* output_width] * output_width]
*/ */
template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> { template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
public: public:
void operator()(const framework::Tensor &col, void operator()(const framework::Tensor &col,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
...@@ -145,15 +140,12 @@ template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> { ...@@ -145,15 +140,12 @@ template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
int h_offset = (c / filter_width) % filter_height; int h_offset = (c / filter_width) % filter_height;
int c_im = c / (filter_width * filter_height); int c_im = c / (filter_width * filter_height);
for (int h = 0; h < col_height; ++h) { for (int h = 0; h < col_height; ++h) {
int im_row_idx = int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0];
h * stride[0] - padding[0] + h_offset * dilation[0];
for (int w = 0; w < col_width; ++w) { for (int w = 0; w < col_width; ++w) {
int im_col_idx = int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1];
w * stride[1] - padding[1] + w_offset * dilation[1];
if ((im_row_idx) >= 0 && (im_row_idx) < im_height && if ((im_row_idx) >= 0 && (im_row_idx) < im_height &&
(im_col_idx) >= 0 && (im_col_idx) < im_width) { (im_col_idx) >= 0 && (im_col_idx) < im_width) {
im_data[(im_row_idx + c_im * im_height) * im_width + im_data[(im_row_idx + c_im * im_height) * im_width + im_col_idx] +=
im_col_idx] +=
col_data[(c * col_height + h) * col_width + w]; col_data[(c * col_height + h) * col_width + w];
} }
} }
...@@ -174,9 +166,8 @@ template class Col2ImFunctor<ColFormat::kCFO, CPU, double>; ...@@ -174,9 +166,8 @@ template class Col2ImFunctor<ColFormat::kCFO, CPU, double>;
* filter_width] * filter_width]
*/ */
template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> { template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
public: public:
void operator()(const framework::Tensor &im, void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col) { const std::vector<int> &padding, framework::Tensor *col) {
// PADDLE_ENFORCE(im.dims().size() == 3); // PADDLE_ENFORCE(im.dims().size() == 3);
...@@ -210,29 +201,25 @@ template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> { ...@@ -210,29 +201,25 @@ template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
for (int channel = 0; channel < im_channels; ++channel) { for (int channel = 0; channel < im_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height; for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) { ++filter_row_idx) {
int im_row_offset = col_row_idx * stride[0] + int im_row_offset =
filter_row_idx - padding[0]; col_row_idx * stride[0] + filter_row_idx - padding[0];
for (int filter_col_idx = 0; for (int filter_col_idx = 0; filter_col_idx < filter_width;
filter_col_idx < filter_width; ++filter_col_idx) { ++filter_col_idx) {
int im_col_offset = col_col_idx * stride[1] + int im_col_offset =
filter_col_idx - padding[1]; col_col_idx * stride[1] + filter_col_idx - padding[1];
int col_offset = int col_offset =
((((col_row_idx)*col_width + col_col_idx) * ((((col_row_idx)*col_width + col_col_idx) * im_channels +
im_channels +
channel) * channel) *
filter_height + filter_height +
filter_row_idx) * filter_row_idx) *
filter_width + filter_width +
filter_col_idx; filter_col_idx;
int im_offset = int im_offset = (channel * im_height + im_row_offset) * im_width +
(channel * im_height + im_row_offset) *
im_width +
im_col_offset; im_col_offset;
col_data[col_offset] = col_data[col_offset] =
(im_row_offset < 0 || (im_row_offset < 0 || im_row_offset >= im_height ||
im_row_offset >= im_height ||
im_col_offset < 0 || im_col_offset >= im_width) im_col_offset < 0 || im_col_offset >= im_width)
? static_cast<T>(0) ? static_cast<T>(0)
: im_data[im_offset]; : im_data[im_offset];
...@@ -251,7 +238,7 @@ template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> { ...@@ -251,7 +238,7 @@ template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
* filter_width] * filter_width]
*/ */
template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> { template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
public: public:
void operator()(const framework::Tensor &col, void operator()(const framework::Tensor &col,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
...@@ -287,29 +274,25 @@ template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> { ...@@ -287,29 +274,25 @@ template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
for (int channel = 0; channel < im_channels; ++channel) { for (int channel = 0; channel < im_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height; for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) { ++filter_row_idx) {
int im_row_offset = col_row_idx * stride[0] + int im_row_offset =
filter_row_idx - padding[0]; col_row_idx * stride[0] + filter_row_idx - padding[0];
for (int filter_col_idx = 0; for (int filter_col_idx = 0; filter_col_idx < filter_width;
filter_col_idx < filter_width; ++filter_col_idx) { ++filter_col_idx) {
int im_col_offset = col_col_idx * stride[1] + int im_col_offset =
filter_col_idx - padding[1]; col_col_idx * stride[1] + filter_col_idx - padding[1];
int col_offset = int col_offset =
(((col_row_idx * col_width + col_col_idx) * (((col_row_idx * col_width + col_col_idx) * im_channels +
im_channels +
channel) * channel) *
filter_height + filter_height +
filter_row_idx) * filter_row_idx) *
filter_width + filter_width +
filter_col_idx; filter_col_idx;
if (im_row_offset >= 0 && if (im_row_offset >= 0 && im_row_offset < im_height &&
im_row_offset < im_height && im_col_offset >= 0 && im_col_offset < im_width) {
im_col_offset >= 0 &&
im_col_offset < im_width) {
int im_offset = int im_offset =
(channel * im_height + im_row_offset) * (channel * im_height + im_row_offset) * im_width +
im_width +
im_col_offset; im_col_offset;
im_data[im_offset] += col_data[col_offset]; im_data[im_offset] += col_data[col_offset];
} }
......
...@@ -89,16 +89,15 @@ enum class ColFormat { kCFO = 0, kOCF = 1 }; ...@@ -89,16 +89,15 @@ enum class ColFormat { kCFO = 0, kOCF = 1 };
*/ */
template <ColFormat Format, typename DeviceType, typename T> template <ColFormat Format, typename DeviceType, typename T>
class Im2ColFunctor { class Im2ColFunctor {
public: public:
void operator()(const framework::Tensor &im, void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col); const std::vector<int> &padding, framework::Tensor *col);
}; };
template <ColFormat Format, typename DeviceType, typename T> template <ColFormat Format, typename DeviceType, typename T>
class Col2ImFunctor { class Col2ImFunctor {
public: public:
void operator()(const framework::Tensor &col, void operator()(const framework::Tensor &col,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
......
...@@ -30,9 +30,8 @@ namespace math { ...@@ -30,9 +30,8 @@ namespace math {
*/ */
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
class PoolFunctor<CPU, PoolProcess, T> { class PoolFunctor<CPU, PoolProcess, T> {
public: public:
void operator()(const framework::Tensor &input, void operator()(const framework::Tensor &input, const std::vector<int> &ksize,
const std::vector<int> &ksize,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_process, const std::vector<int> &paddings, PoolProcess pool_process,
framework::Tensor *output) { framework::Tensor *output) {
...@@ -77,8 +76,7 @@ class PoolFunctor<CPU, PoolProcess, T> { ...@@ -77,8 +76,7 @@ class PoolFunctor<CPU, PoolProcess, T> {
T ele = pool_process.initial(); T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) { for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) { for (int w = wstart; w < wend; ++w) {
pool_process.compute( pool_process.compute(input_data[h * input_width + w], &ele);
input_data[h * input_width + w], &ele);
} }
} }
int pool_size = (hend - hstart) * (wend - wstart); int pool_size = (hend - hstart) * (wend - wstart);
......
...@@ -38,7 +38,7 @@ namespace math { ...@@ -38,7 +38,7 @@ namespace math {
* MaxPoolGrad and AvgPoolGrad are gradient operations respectively. * MaxPoolGrad and AvgPoolGrad are gradient operations respectively.
*/ */
template <class T> class MaxPool { template <class T> class MaxPool {
public: public:
inline T initial() { return static_cast<T>(-FLT_MAX); } inline T initial() { return static_cast<T>(-FLT_MAX); }
inline void compute(const T &x, T *y) { *y = *y > x ? *y : x; } inline void compute(const T &x, T *y) { *y = *y > x ? *y : x; }
...@@ -47,7 +47,7 @@ template <class T> class MaxPool { ...@@ -47,7 +47,7 @@ template <class T> class MaxPool {
}; };
template <class T> class AvgPool { template <class T> class AvgPool {
public: public:
inline T initial() { return static_cast<T>(0); } inline T initial() { return static_cast<T>(0); }
inline void compute(const T &x, T *y) { *y += x; } inline void compute(const T &x, T *y) { *y += x; }
...@@ -57,9 +57,8 @@ template <class T> class AvgPool { ...@@ -57,9 +57,8 @@ template <class T> class AvgPool {
template <typename DeviceType, typename PoolProcess, typename T> template <typename DeviceType, typename PoolProcess, typename T>
class PoolFunctor { class PoolFunctor {
public: public:
void operator()(const framework::Tensor &input, void operator()(const framework::Tensor &input, const std::vector<int> &ksize,
const std::vector<int> &ksize,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_compute, const std::vector<int> &paddings, PoolProcess pool_compute,
framework::Tensor *output); framework::Tensor *output);
......
...@@ -26,7 +26,7 @@ using Tensor = paddle_mobile::framework::Tensor; ...@@ -26,7 +26,7 @@ using Tensor = paddle_mobile::framework::Tensor;
* output_depth, output_height, output_width] * output_depth, output_height, output_width]
*/ */
template <typename T> class Vol2ColFunctor<CPU, T> { template <typename T> class Vol2ColFunctor<CPU, T> {
public: public:
void operator()(const Tensor &vol, const std::vector<int> &dilations, void operator()(const Tensor &vol, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *col) const { const std::vector<int> &paddings, Tensor *col) const {
...@@ -81,28 +81,21 @@ template <typename T> class Vol2ColFunctor<CPU, T> { ...@@ -81,28 +81,21 @@ template <typename T> class Vol2ColFunctor<CPU, T> {
int d_offset = (c / filter_width / filter_height) % filter_depth; int d_offset = (c / filter_width / filter_height) % filter_depth;
int c_in = c / filter_width / filter_height / filter_depth; int c_in = c / filter_width / filter_height / filter_depth;
for (int d = 0; d < output_depth; ++d) { for (int d = 0; d < output_depth; ++d) {
int d_pad = int d_pad = d * strides[0] - paddings[0] + d_offset * dilations[0];
d * strides[0] - paddings[0] + d_offset * dilations[0];
for (int h = 0; h < output_height; ++h) { for (int h = 0; h < output_height; ++h) {
int h_pad = int h_pad = h * strides[1] - paddings[1] + h_offset * dilations[1];
h * strides[1] - paddings[1] + h_offset * dilations[1];
for (int w = 0; w < output_width; ++w) { for (int w = 0; w < output_width; ++w) {
int w_pad = w * strides[2] - paddings[2] + int w_pad = w * strides[2] - paddings[2] + w_offset * dilations[2];
w_offset * dilations[2];
int col_idx = int col_idx =
((c * output_depth + d) * output_height + h) * ((c * output_depth + d) * output_height + h) * output_width + w;
output_width +
w;
int vol_idx = int vol_idx =
((c_in * input_depth + d_pad) * input_height + ((c_in * input_depth + d_pad) * input_height + h_pad) *
h_pad) *
input_width + input_width +
w_pad; w_pad;
col_data[col_idx] = col_data[col_idx] =
(h_pad < 0 || h_pad >= input_height || w_pad < 0 || (h_pad < 0 || h_pad >= input_height || w_pad < 0 ||
w_pad >= input_width || d_pad < 0 || w_pad >= input_width || d_pad < 0 || d_pad >= input_depth)
d_pad >= input_depth)
? static_cast<T>(0) ? static_cast<T>(0)
: vol_data[vol_idx]; : vol_data[vol_idx];
} }
...@@ -119,7 +112,7 @@ template <typename T> class Vol2ColFunctor<CPU, T> { ...@@ -119,7 +112,7 @@ template <typename T> class Vol2ColFunctor<CPU, T> {
* output_depth, output_height, output_width] * output_depth, output_height, output_width]
*/ */
template <typename T> class Col2VolFunctor<CPU, T> { template <typename T> class Col2VolFunctor<CPU, T> {
public: public:
void operator()(const Tensor &col, const std::vector<int> &dilations, void operator()(const Tensor &col, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *vol) const { const std::vector<int> &paddings, Tensor *vol) const {
...@@ -173,27 +166,21 @@ template <typename T> class Col2VolFunctor<CPU, T> { ...@@ -173,27 +166,21 @@ template <typename T> class Col2VolFunctor<CPU, T> {
int d_offset = (c / filter_width / filter_height) % filter_depth; int d_offset = (c / filter_width / filter_height) % filter_depth;
int cIm = c / filter_width / filter_height / filter_depth; int cIm = c / filter_width / filter_height / filter_depth;
for (int d = 0; d < output_depth; ++d) { for (int d = 0; d < output_depth; ++d) {
int d_pad = int d_pad = d * strides[0] - paddings[0] + d_offset * dilations[0];
d * strides[0] - paddings[0] + d_offset * dilations[0];
for (int h = 0; h < output_height; ++h) { for (int h = 0; h < output_height; ++h) {
int h_pad = int h_pad = h * strides[1] - paddings[1] + h_offset * dilations[1];
h * strides[1] - paddings[1] + h_offset * dilations[1];
for (int w = 0; w < output_width; ++w) { for (int w = 0; w < output_width; ++w) {
int w_pad = w * strides[2] - paddings[2] + int w_pad = w * strides[2] - paddings[2] + w_offset * dilations[2];
w_offset * dilations[2];
if (h_pad >= 0 && h_pad < input_height && w_pad >= 0 && if (h_pad >= 0 && h_pad < input_height && w_pad >= 0 &&
w_pad < input_width && d_pad >= 0 && w_pad < input_width && d_pad >= 0 && d_pad < input_depth) {
d_pad < input_depth) {
int vol_idx = int vol_idx =
((cIm * input_depth + d_pad) * input_height + ((cIm * input_depth + d_pad) * input_height + h_pad) *
h_pad) *
input_width + input_width +
w_pad; w_pad;
int col_idx = int col_idx =
((c * output_depth + d) * output_height + h) * ((c * output_depth + d) * output_height + h) * output_width +
output_width +
w; w;
vol_data[vol_idx] += col_data[col_idx]; vol_data[vol_idx] += col_data[col_idx];
} }
......
...@@ -73,14 +73,14 @@ namespace math { ...@@ -73,14 +73,14 @@ namespace math {
using Tensor = paddle_mobile::framework::Tensor; using Tensor = paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> class Vol2ColFunctor { template <typename DeviceType, typename T> class Vol2ColFunctor {
public: public:
void operator()(const Tensor &vol, const std::vector<int> &dilations, void operator()(const Tensor &vol, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *col) const; const std::vector<int> &paddings, Tensor *col) const;
}; };
template <typename DeviceType, typename T> class Col2VolFunctor { template <typename DeviceType, typename T> class Col2VolFunctor {
public: public:
void operator()(const Tensor &col, const std::vector<int> &dilations, void operator()(const Tensor &col, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *vol) const; const std::vector<int> &paddings, Tensor *vol) const;
......
...@@ -27,12 +27,12 @@ using namespace framework; ...@@ -27,12 +27,12 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class MulOp : public framework::OperatorWithKernel<DeviceType> { class MulOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
MulOp(const std::string &type, const VariableNameMap &inputs, MulOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
attrs, scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void Run() const {
...@@ -43,7 +43,7 @@ class MulOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -43,7 +43,7 @@ class MulOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
MulParam param_; MulParam param_;
}; };
......
...@@ -24,8 +24,7 @@ Print &operator<<(Print &printer, const ConvParam &conv_param) { ...@@ -24,8 +24,7 @@ Print &operator<<(Print &printer, const ConvParam &conv_param) {
printer << "parameter of conv: " printer << "parameter of conv: "
<< "\n"; << "\n";
printer << " stride: " printer << " stride: "
<< " (" << conv_param.Strides()[0] << conv_param.Strides()[1] << " (" << conv_param.Strides()[0] << conv_param.Strides()[1] << ") "
<< ") "
<< "\n"; << "\n";
printer << " paddings: " printer << " paddings: "
<< " (" << conv_param.Paddings()[0] << conv_param.Paddings()[1] << " (" << conv_param.Paddings()[0] << conv_param.Paddings()[1]
......
...@@ -31,8 +31,8 @@ namespace operators { ...@@ -31,8 +31,8 @@ namespace operators {
using namespace framework; using namespace framework;
class OpParam : PaddleMobileObject { class OpParam : PaddleMobileObject {
public: public:
protected: protected:
template <typename T> template <typename T>
static T *InputFrom(const VariableNameMap &inputs, const Scope &scope) { static T *InputFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetVarValue<T>("Input", inputs, scope); return GetVarValue<T>("Input", inputs, scope);
...@@ -62,8 +62,7 @@ class OpParam : PaddleMobileObject { ...@@ -62,8 +62,7 @@ class OpParam : PaddleMobileObject {
return GetVarValue<T>("Mean", inputs, scope); return GetVarValue<T>("Mean", inputs, scope);
} }
template <typename T> template <typename T>
static T *InputScaleFrom(const VariableNameMap &inputs, static T *InputScaleFrom(const VariableNameMap &inputs, const Scope &scope) {
const Scope &scope) {
return GetVarValue<T>("Scale", inputs, scope); return GetVarValue<T>("Scale", inputs, scope);
} }
...@@ -104,8 +103,8 @@ class OpParam : PaddleMobileObject { ...@@ -104,8 +103,8 @@ class OpParam : PaddleMobileObject {
} }
template <typename T> template <typename T>
static T *GetVarValue(const std::string &key, static T *GetVarValue(const std::string &key, const VariableNameMap &var_map,
const VariableNameMap &var_map, const Scope &scope) { const Scope &scope) {
auto var_vec = var_map.at(key); auto var_vec = var_map.at(key);
if (!var_vec.empty()) { if (!var_vec.empty()) {
// std::cout << " get var value -- " << var_vec[0] << // std::cout << " get var value -- " << var_vec[0] <<
...@@ -133,7 +132,7 @@ class OpParam : PaddleMobileObject { ...@@ -133,7 +132,7 @@ class OpParam : PaddleMobileObject {
}; };
class ConvParam : OpParam { class ConvParam : OpParam {
public: public:
ConvParam(const VariableNameMap &inputs, const VariableNameMap &outputs, ConvParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
const framework::Scope &scope) { const framework::Scope &scope) {
...@@ -160,7 +159,7 @@ class ConvParam : OpParam { ...@@ -160,7 +159,7 @@ class ConvParam : OpParam {
const int &Groups() const { return groups; } const int &Groups() const { return groups; }
private: private:
Tensor *input_; Tensor *input_;
Tensor *output_; Tensor *output_;
LoDTensor *filter_; LoDTensor *filter_;
...@@ -173,7 +172,7 @@ class ConvParam : OpParam { ...@@ -173,7 +172,7 @@ class ConvParam : OpParam {
Print &operator<<(Print &printer, const ConvParam &conv_param); Print &operator<<(Print &printer, const ConvParam &conv_param);
class ElementwiseAddParam : OpParam { class ElementwiseAddParam : OpParam {
public: public:
ElementwiseAddParam(const VariableNameMap &inputs, ElementwiseAddParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
...@@ -192,7 +191,7 @@ class ElementwiseAddParam : OpParam { ...@@ -192,7 +191,7 @@ class ElementwiseAddParam : OpParam {
const int &Axis() const { return axis_; } const int &Axis() const { return axis_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_y_; Tensor *input_y_;
Tensor *out_; Tensor *out_;
...@@ -200,7 +199,7 @@ class ElementwiseAddParam : OpParam { ...@@ -200,7 +199,7 @@ class ElementwiseAddParam : OpParam {
}; };
class MulParam : OpParam { class MulParam : OpParam {
public: public:
MulParam(const VariableNameMap &inputs, const VariableNameMap &outputs, MulParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
const framework::Scope &scope) { const framework::Scope &scope) {
...@@ -221,7 +220,7 @@ class MulParam : OpParam { ...@@ -221,7 +220,7 @@ class MulParam : OpParam {
const int &YNumColDims() const { return y_num_col_dims_; } const int &YNumColDims() const { return y_num_col_dims_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_y_; Tensor *input_y_;
Tensor *out_; Tensor *out_;
...@@ -230,7 +229,7 @@ class MulParam : OpParam { ...@@ -230,7 +229,7 @@ class MulParam : OpParam {
}; };
class ConcatParam : public OpParam { class ConcatParam : public OpParam {
public: public:
ConcatParam(const VariableNameMap &inputs, const VariableNameMap &outputs, ConcatParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
const framework::Scope &scope) { const framework::Scope &scope) {
...@@ -245,14 +244,14 @@ class ConcatParam : public OpParam { ...@@ -245,14 +244,14 @@ class ConcatParam : public OpParam {
const int &Axis() const { return axis_; } const int &Axis() const { return axis_; }
private: private:
std::vector<Tensor *> inputs_; std::vector<Tensor *> inputs_;
Tensor *out_; Tensor *out_;
int axis_; int axis_;
}; };
class LrnParam : public OpParam { class LrnParam : public OpParam {
public: public:
LrnParam(const VariableNameMap &inputs, const VariableNameMap &outputs, LrnParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
const framework::Scope &scope) { const framework::Scope &scope) {
...@@ -282,7 +281,7 @@ class LrnParam : public OpParam { ...@@ -282,7 +281,7 @@ class LrnParam : public OpParam {
const std::string &DataFormat() const { return data_format_; } const std::string &DataFormat() const { return data_format_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *out_; Tensor *out_;
Tensor *mid_out_; Tensor *mid_out_;
...@@ -293,9 +292,8 @@ class LrnParam : public OpParam { ...@@ -293,9 +292,8 @@ class LrnParam : public OpParam {
std::string data_format_; std::string data_format_;
}; };
class BatchNormParam : OpParam { class BatchNormParam : OpParam {
public: public:
BatchNormParam(const VariableNameMap &inputs, BatchNormParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
const framework::Scope &scope) { const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope); input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
...@@ -329,7 +327,7 @@ class BatchNormParam : OpParam { ...@@ -329,7 +327,7 @@ class BatchNormParam : OpParam {
const std::string &DataFormat() const { return data_format_; } const std::string &DataFormat() const { return data_format_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *output_y_; Tensor *output_y_;
Tensor *input_bias_; Tensor *input_bias_;
...@@ -342,7 +340,7 @@ class BatchNormParam : OpParam { ...@@ -342,7 +340,7 @@ class BatchNormParam : OpParam {
std::string data_format_; std::string data_format_;
}; };
class PoolParam : public OpParam { class PoolParam : public OpParam {
public: public:
PoolParam(const VariableNameMap &inputs, const VariableNameMap &outputs, PoolParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
const framework::Scope &scope) { const framework::Scope &scope) {
...@@ -373,7 +371,7 @@ class PoolParam : public OpParam { ...@@ -373,7 +371,7 @@ class PoolParam : public OpParam {
bool isGlobalPooling() const { return gloabal_pooling_; } bool isGlobalPooling() const { return gloabal_pooling_; }
private: private:
Tensor *input_; Tensor *input_;
Tensor *output_; Tensor *output_;
std::string pooling_type_; std::string pooling_type_;
......
...@@ -49,8 +49,8 @@ void PoolOp<DeviceType, T>::InferShape() const { ...@@ -49,8 +49,8 @@ void PoolOp<DeviceType, T>::InferShape() const {
} }
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
output_shape.push_back(PoolOutputSize( output_shape.push_back(PoolOutputSize(in_x_dims[i + 2], ksize[i],
in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode)); paddings[i], strides[i], ceil_mode));
} }
param_.Output()->Resize(framework::make_ddim(output_shape)); param_.Output()->Resize(framework::make_ddim(output_shape));
DLOG << "infer shape out size =" << param_.Output()->numel(); DLOG << "infer shape out size =" << param_.Output()->numel();
......
...@@ -28,12 +28,12 @@ using namespace framework; ...@@ -28,12 +28,12 @@ using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class PoolOp : public framework::OperatorWithKernel<DeviceType> { class PoolOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
PoolOp(const std::string &type, const VariableNameMap &inputs, PoolOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
attrs, scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
...@@ -45,7 +45,7 @@ class PoolOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -45,7 +45,7 @@ class PoolOp : public framework::OperatorWithKernel<DeviceType> {
this->ClearVariables({"X"}); this->ClearVariables({"X"});
} }
private: private:
PoolParam param_; PoolParam param_;
}; };
} // namespace operators } // namespace operators
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
// Disable the copy and assignment operator for a class. // Disable the copy and assignment operator for a class.
#ifndef DISABLE_COPY_AND_ASSIGN #ifndef DISABLE_COPY_AND_ASSIGN
#define DISABLE_COPY_AND_ASSIGN(classname) \ #define DISABLE_COPY_AND_ASSIGN(classname) \
private: \ private: \
classname(const classname &) = delete; \ classname(const classname &) = delete; \
classname(classname &&) = delete; \ classname(classname &&) = delete; \
classname &operator=(const classname &) = delete; \ classname &operator=(const classname &) = delete; \
......
...@@ -38,8 +38,8 @@ Executor4Test<DeviceType, OpType>::Executor4Test(const Program<DeviceType> p, ...@@ -38,8 +38,8 @@ Executor4Test<DeviceType, OpType>::Executor4Test(const Program<DeviceType> p,
std::shared_ptr<OpDesc> op = ops[j]; std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == op_type) { if (op->Type() == op_type) {
std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>( std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>(
op->Type(), op->GetInputs(), op->GetOutputs(), op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
op->GetAttrMap(), this->program_.scope); this->program_.scope);
this->ops_of_block_[*block_desc.get()].push_back(op_ptr); this->ops_of_block_[*block_desc.get()].push_back(op_ptr);
break; break;
......
...@@ -27,7 +27,7 @@ using namespace paddle_mobile::framework; ...@@ -27,7 +27,7 @@ using namespace paddle_mobile::framework;
template <typename DeviceType, typename OpType> template <typename DeviceType, typename OpType>
class Executor4Test : public Executor<DeviceType> { class Executor4Test : public Executor<DeviceType> {
public: public:
Executor4Test(const Program<DeviceType> p, std::string op_type); Executor4Test(const Program<DeviceType> p, std::string op_type);
std::shared_ptr<Tensor> predict(Tensor &t, std::string input, std::shared_ptr<Tensor> predict(Tensor &t, std::string input,
......
...@@ -24,7 +24,7 @@ namespace paddle_mobile { ...@@ -24,7 +24,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
template <typename Dtype> class TestBatchNormOp { template <typename Dtype> class TestBatchNormOp {
public: public:
explicit TestBatchNormOp(const Program<Dtype> p) : program_(p) { explicit TestBatchNormOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) { if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram; to_predict_program_ = program_.optimizeProgram;
...@@ -52,8 +52,7 @@ template <typename Dtype> class TestBatchNormOp { ...@@ -52,8 +52,7 @@ template <typename Dtype> class TestBatchNormOp {
DLOG << " Input Scale is : " << op->Input("Scale")[0]; DLOG << " Input Scale is : " << op->Input("Scale")[0];
DLOG << " Input Bias is : " << op->Input("Bias")[0]; DLOG << " Input Bias is : " << op->Input("Bias")[0];
DLOG << " Output Y is : " << op->Output("Y")[0]; DLOG << " Output Y is : " << op->Output("Y")[0];
DLOG << " epsilon : " DLOG << " epsilon : " << op->GetAttrMap().at("epsilon").Get<float>();
<< op->GetAttrMap().at("epsilon").Get<float>();
std::shared_ptr<operators::BatchNormOp<Dtype, float>> lrn = std::shared_ptr<operators::BatchNormOp<Dtype, float>> lrn =
std::make_shared<operators::BatchNormOp<Dtype, float>>( std::make_shared<operators::BatchNormOp<Dtype, float>>(
op->Type(), op->GetInputs(), op->GetOutputs(), op->Type(), op->GetInputs(), op->GetOutputs(),
...@@ -101,7 +100,7 @@ template <typename Dtype> class TestBatchNormOp { ...@@ -101,7 +100,7 @@ template <typename Dtype> class TestBatchNormOp {
return out_tensor; return out_tensor;
} }
private: private:
const framework::Program<Dtype> program_; const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_; std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc, std::map<framework::BlockDesc,
...@@ -113,8 +112,7 @@ template <typename Dtype> class TestBatchNormOp { ...@@ -113,8 +112,7 @@ template <typename Dtype> class TestBatchNormOp {
const Tensor &t4, const Tensor &t5, int block_id) { const Tensor &t4, const Tensor &t5, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block = std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id); to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
++j) {
auto op = ops_of_block_[*to_predict_block.get()][j]; auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()"; DLOG << "op -> run()";
op->Run(); op->Run();
...@@ -140,8 +138,7 @@ int main() { ...@@ -140,8 +138,7 @@ int main() {
auto *inputx1_ptr = inputx1.data<float>(); auto *inputx1_ptr = inputx1.data<float>();
paddle_mobile::framework::Tensor mean; paddle_mobile::framework::Tensor mean;
SetupTensor<float>(&mean, {10}, static_cast<float>(0), SetupTensor<float>(&mean, {10}, static_cast<float>(0), static_cast<float>(1));
static_cast<float>(1));
auto *mean_ptr = mean.data<float>(); auto *mean_ptr = mean.data<float>();
paddle_mobile::framework::Tensor scale; paddle_mobile::framework::Tensor scale;
...@@ -155,12 +152,11 @@ int main() { ...@@ -155,12 +152,11 @@ int main() {
auto *variance_ptr = variance.data<float>(); auto *variance_ptr = variance.data<float>();
paddle_mobile::framework::Tensor bias; paddle_mobile::framework::Tensor bias;
SetupTensor<float>(&bias, {10}, static_cast<float>(0), SetupTensor<float>(&bias, {10}, static_cast<float>(0), static_cast<float>(1));
static_cast<float>(1));
auto *bias_ptr = bias.data<float>(); auto *bias_ptr = bias.data<float>();
paddle_mobile::framework::TestBatchNormOp<paddle_mobile::CPU> paddle_mobile::framework::TestBatchNormOp<paddle_mobile::CPU> testBatchNormOp(
testBatchNormOp(program); program);
auto output_bn = auto output_bn =
testBatchNormOp.predict_bn(inputx1, mean, scale, variance, bias); testBatchNormOp.predict_bn(inputx1, mean, scale, variance, bias);
......
...@@ -24,7 +24,7 @@ namespace paddle_mobile { ...@@ -24,7 +24,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
template <typename Dtype> class TestConcatOp { template <typename Dtype> class TestConcatOp {
public: public:
explicit TestConcatOp(const Program<Dtype> p) : program_(p) { explicit TestConcatOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) { if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram; to_predict_program_ = program_.optimizeProgram;
...@@ -41,15 +41,13 @@ template <typename Dtype> class TestConcatOp { ...@@ -41,15 +41,13 @@ template <typename Dtype> class TestConcatOp {
// DLOG << " ops " << ops.size(); // DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) { for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j]; std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "concat" && if (op->Type() == "concat" && op->Input("X")[0] == "conv2d_3.tmp_1") {
op->Input("X")[0] == "conv2d_3.tmp_1") {
DLOG << " mul attr size: " << op->GetAttrMap().size(); DLOG << " mul attr size: " << op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size(); DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: " << op->GetOutputs().size(); DLOG << " outputs size: " << op->GetOutputs().size();
DLOG << " Input X is : " << op->Input("X")[0]; DLOG << " Input X is : " << op->Input("X")[0];
DLOG << " Output Out is : " << op->Output("Out")[0]; DLOG << " Output Out is : " << op->Output("Out")[0];
DLOG << " axis : " DLOG << " axis : " << op->GetAttrMap().at("axis").Get<int>();
<< op->GetAttrMap().at("axis").Get<int>();
std::shared_ptr<operators::ConcatOp<Dtype, float>> concat = std::shared_ptr<operators::ConcatOp<Dtype, float>> concat =
std::make_shared<operators::ConcatOp<Dtype, float>>( std::make_shared<operators::ConcatOp<Dtype, float>>(
...@@ -94,7 +92,7 @@ template <typename Dtype> class TestConcatOp { ...@@ -94,7 +92,7 @@ template <typename Dtype> class TestConcatOp {
return out_tensor; return out_tensor;
} }
private: private:
const framework::Program<Dtype> program_; const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_; std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc, std::map<framework::BlockDesc,
...@@ -106,8 +104,7 @@ template <typename Dtype> class TestConcatOp { ...@@ -106,8 +104,7 @@ template <typename Dtype> class TestConcatOp {
const Tensor &t4, int block_id) { const Tensor &t4, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block = std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id); to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
++j) {
auto op = ops_of_block_[*to_predict_block.get()][j]; auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()"; DLOG << "op -> run()";
op->Run(); op->Run();
...@@ -168,8 +165,7 @@ int main() { ...@@ -168,8 +165,7 @@ int main() {
/// output (4,100,2,2) /// output (4,100,2,2)
int input_index = int input_index =
input_n * stride0 + input_c * stride1 + input_h * stride2 + input_w; input_n * stride0 + input_c * stride1 + input_h * stride2 + input_w;
int output_index = int output_index = input_n * 100 * 2 * 2 +
input_n * 100 * 2 * 2 +
(input_c + inputx1.dims()[1] + inputx2.dims()[1]) * 2 * 2 + (input_c + inputx1.dims()[1] + inputx2.dims()[1]) * 2 * 2 +
input_h * 2 + input_w; input_h * 2 + input_w;
......
此差异已折叠。
...@@ -24,7 +24,7 @@ namespace paddle_mobile { ...@@ -24,7 +24,7 @@ namespace paddle_mobile {
namespace framework { namespace framework {
template <typename Dtype> class TestMulOp { template <typename Dtype> class TestMulOp {
public: public:
explicit TestMulOp(const Program<Dtype> p) : program_(p) { explicit TestMulOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) { if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram; to_predict_program_ = program_.optimizeProgram;
...@@ -41,8 +41,7 @@ template <typename Dtype> class TestMulOp { ...@@ -41,8 +41,7 @@ template <typename Dtype> class TestMulOp {
// DLOG << " ops " << ops.size(); // DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) { for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j]; std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "mul" && if (op->Type() == "mul" && op->Input("X")[0] == "pool2d_0.tmp_0") {
op->Input("X")[0] == "pool2d_0.tmp_0") {
DLOG << " mul attr size: " << op->GetAttrMap().size(); DLOG << " mul attr size: " << op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size(); DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: " << op->GetOutputs().size(); DLOG << " outputs size: " << op->GetOutputs().size();
...@@ -88,7 +87,7 @@ template <typename Dtype> class TestMulOp { ...@@ -88,7 +87,7 @@ template <typename Dtype> class TestMulOp {
return out_tensor; return out_tensor;
} }
private: private:
const framework::Program<Dtype> program_; const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_; std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc, std::map<framework::BlockDesc,
...@@ -99,8 +98,7 @@ template <typename Dtype> class TestMulOp { ...@@ -99,8 +98,7 @@ template <typename Dtype> class TestMulOp {
void predict_mul(const Tensor &t1, const Tensor &t2, int block_id) { void predict_mul(const Tensor &t1, const Tensor &t2, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block = std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id); to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
++j) {
auto op = ops_of_block_[*to_predict_block.get()][j]; auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()"; DLOG << "op -> run()";
op->Run(); op->Run();
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册