提交 8b8d7e08 编写于 作者: 朔-望's avatar 朔-望

add clang-tidy support

上级 17535d85
---
Checks: 'clang-diagnostic-*,clang-analyzer-*'
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
User: allonli
CheckOptions:
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
...
...@@ -20,14 +20,14 @@ repos: ...@@ -20,14 +20,14 @@ repos:
- id: trailing-whitespace - id: trailing-whitespace
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$ files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
#- repo: local - repo: local
# hooks: hooks:
# - id: clang-format-with-version-check - id: clang-tidy
# name: clang-format name: clang-tidy
# description: Format files with ClangFormat. description: Format files with tidy.
# entry: bash ./tools/pre-commit.hooks/.clang_format.hook -i entry: bash ./tools/pre-commit.hooks/.clang-tidy.hook -i
# language: system language: system
# files: (src).*\.(c|cc|cxx|cpp|h|hpp|hxx)$ files: (src).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
# #
#- repo: local #- repo: local
# hooks: # hooks:
......
...@@ -27,7 +27,7 @@ SOFTWARE. ...@@ -27,7 +27,7 @@ SOFTWARE.
namespace paddle_mobile { namespace paddle_mobile {
enum LogLevel { enum LogLevel {
kNO_LOG, kNO_LOG,
kLOG_ERROR, kLOG_ERROR,
kLOG_WARNING, kLOG_WARNING,
...@@ -37,25 +37,25 @@ namespace paddle_mobile { ...@@ -37,25 +37,25 @@ namespace paddle_mobile {
kLOG_DEBUG2, kLOG_DEBUG2,
kLOG_DEBUG3, kLOG_DEBUG3,
kLOG_DEBUG4 kLOG_DEBUG4
}; };
// log level // log level
static LogLevel log_level = kLOG_DEBUG4; static LogLevel log_level = kLOG_DEBUG4;
static std::vector<std::string> logs{"NO", "ERROR ", "WARNING", static std::vector<std::string> logs{"NO", "ERROR ", "WARNING",
"INFO ", "DEBUG ", "DEBUG1 ", "INFO ", "DEBUG ", "DEBUG1 ",
"DEBUG2 ", "DEBUG3 ", "DEBUG4 "}; "DEBUG2 ", "DEBUG3 ", "DEBUG4 "};
struct ToLog; struct ToLog;
struct Print { struct Print {
friend struct ToLog; friend struct ToLog;
template <typename T> Print &operator<<(T const &value) { template<typename T> Print &operator<<(T const &value) {
buffer_ << value; buffer_ << value;
return *this; return *this;
} }
private: private:
void print(LogLevel level) { void print(LogLevel level) {
buffer_ << std::endl; buffer_ << std::endl;
if (level == kLOG_ERROR) { if (level == kLOG_ERROR) {
...@@ -65,28 +65,28 @@ namespace paddle_mobile { ...@@ -65,28 +65,28 @@ namespace paddle_mobile {
} }
} }
std::ostringstream buffer_; std::ostringstream buffer_;
}; };
struct ToLog { struct ToLog {
ToLog(LogLevel level = kLOG_DEBUG, const std::string &info = "") ToLog(LogLevel level = kLOG_DEBUG, const std::string &info = "")
: level_(level) { : level_(level) {
unsigned blanks = unsigned blanks =
(unsigned)(level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1); (unsigned) (level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1);
printer_ << logs[level] << " " << info << ":" printer_ << logs[level] << " " << info << ":"
<< std::string(blanks, ' '); << std::string(blanks, ' ');
} }
template <typename T> ToLog &operator<<(T const &value) { template<typename T> ToLog &operator<<(T const &value) {
printer_ << value; printer_ << value;
return *this; return *this;
} }
~ToLog() { printer_.print(level_); } ~ToLog() { printer_.print(level_); }
private: private:
LogLevel level_; LogLevel level_;
Print printer_; Print printer_;
}; };
#define LOG(level) \ #define LOG(level) \
if (level > paddle_mobile::log_level) { \ if (level > paddle_mobile::log_level) { \
......
...@@ -23,31 +23,31 @@ SOFTWARE. ...@@ -23,31 +23,31 @@ SOFTWARE.
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
template <typename Dtype> class OperatorBase; template<typename Dtype> class OperatorBase;
class OpDesc; class OpDesc;
class BlockDesc; class BlockDesc;
class InferShapeContext; class InferShapeContext;
} }
using VariableNameMap = std::map<std::string, std::vector<std::string>>; using VariableNameMap = std::map<std::string, std::vector<std::string>>;
template <typename Dtype> template<typename Dtype>
using OpCreator = std::function<framework::OperatorBase<Dtype> *( using OpCreator = std::function<framework::OperatorBase<Dtype> *(
const std::string & /*type*/, const VariableNameMap & /*inputs*/, const std::string & /*type*/, const VariableNameMap & /*inputs*/,
const VariableNameMap & /*outputs*/, const VariableNameMap & /*outputs*/,
const framework::AttributeMap & /*attrs*/)>; const framework::AttributeMap & /*attrs*/)>;
using GradOpMakerFN = using GradOpMakerFN =
std::function<std::vector<std::unique_ptr<framework::OpDesc>>( std::function<std::vector<std::unique_ptr<framework::OpDesc>>(
const framework::OpDesc &, const framework::OpDesc &,
const std::unordered_set<std::string> & /*no_grad_set*/, const std::unordered_set<std::string> & /*no_grad_set*/,
std::unordered_map<std::string, std::string> * /*grad_to_var*/, std::unordered_map<std::string, std::string> * /*grad_to_var*/,
const std::vector<framework::BlockDesc *> &grad_block)>; const std::vector<framework::BlockDesc *> &grad_block)>;
using InferVarTypeFN = using InferVarTypeFN =
std::function<void(const framework::OpDesc & /*op_desc*/, std::function<void(const framework::OpDesc & /*op_desc*/,
framework::BlockDesc * /*block*/)>; framework::BlockDesc * /*block*/)>;
using InferShapeFN = std::function<void(framework::InferShapeContext *)>; using InferShapeFN = std::function<void(framework::InferShapeContext *)>;
}; };
...@@ -19,19 +19,19 @@ SOFTWARE. ...@@ -19,19 +19,19 @@ SOFTWARE.
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
enum class Precision : int { FP32 = 0 }; enum class Precision : int { FP32 = 0 };
//! device type //! device type
enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 }; enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 };
template <DeviceTypeEnum T> struct DeviceType {}; template<DeviceTypeEnum T> struct DeviceType {};
typedef DeviceType<kCPU> CPU; typedef DeviceType<kCPU> CPU;
typedef DeviceType<kFPGA> FPGA; typedef DeviceType<kFPGA> FPGA;
typedef DeviceType<kGPU_MALI> GPU_MALI; typedef DeviceType<kGPU_MALI> GPU_MALI;
//! data type //! data type
enum DataType { enum DataType {
PM_INVALID = -1, PM_INVALID = -1,
PM_HALF = 0, PM_HALF = 0,
PM_FLOAT = 1, PM_FLOAT = 1,
...@@ -47,9 +47,9 @@ namespace paddle_mobile { ...@@ -47,9 +47,9 @@ namespace paddle_mobile {
PM_BOOL = 11, PM_BOOL = 11,
PM_SHAPE = 12, PM_SHAPE = 12,
PM_TENSOR = 13 PM_TENSOR = 13
}; };
//! //!
enum PMStatus { enum PMStatus {
PMSuccess = 0xFF, /*!< No errors */ PMSuccess = 0xFF, /*!< No errors */
PMNotInitialized = 0x01, /*!< Data not initialized. */ PMNotInitialized = 0x01, /*!< Data not initialized. */
PMInvalidValue = 0x02, /*!< Incorrect variable value. */ PMInvalidValue = 0x02, /*!< Incorrect variable value. */
...@@ -59,5 +59,5 @@ namespace paddle_mobile { ...@@ -59,5 +59,5 @@ namespace paddle_mobile {
PMOutOfMem = 0x06, /*!< OOM error*/ PMOutOfMem = 0x06, /*!< OOM error*/
PMUnImplError = 0x07, /*!< Unimplement error. */ PMUnImplError = 0x07, /*!< Unimplement error. */
PMWrongDevice = 0x08 /*!< un-correct device. */ PMWrongDevice = 0x08 /*!< un-correct device. */
}; };
} }
...@@ -15,5 +15,3 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ...@@ -15,5 +15,3 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
==============================================================================*/ ==============================================================================*/
#include "variant.h"
...@@ -21,9 +21,9 @@ SOFTWARE. ...@@ -21,9 +21,9 @@ SOFTWARE.
#pragma once #pragma once
namespace paddle_mobile { namespace paddle_mobile {
template <int ID, typename Type> struct IDToType { typedef Type type_t; }; template<int ID, typename Type> struct IDToType { typedef Type type_t; };
template <typename F, typename... Ts> struct VariantHelper { template<typename F, typename... Ts> struct VariantHelper {
static const size_t size = sizeof(F) > VariantHelper<Ts...>::size static const size_t size = sizeof(F) > VariantHelper<Ts...>::size
? sizeof(F) ? sizeof(F)
: VariantHelper<Ts...>::size; : VariantHelper<Ts...>::size;
...@@ -35,9 +35,9 @@ namespace paddle_mobile { ...@@ -35,9 +35,9 @@ namespace paddle_mobile {
VariantHelper<Ts...>::Destroy(id, data); VariantHelper<Ts...>::Destroy(id, data);
} }
} }
}; };
template <typename F> struct VariantHelper<F> { template<typename F> struct VariantHelper<F> {
static const size_t size = sizeof(F); static const size_t size = sizeof(F);
inline static void Destroy(size_t id, void *data) { inline static void Destroy(size_t id, void *data) {
if (id == typeid(F).hash_code()) { if (id == typeid(F).hash_code()) {
...@@ -46,19 +46,19 @@ namespace paddle_mobile { ...@@ -46,19 +46,19 @@ namespace paddle_mobile {
// std::cout << "未匹配到 " << std::endl; // std::cout << "未匹配到 " << std::endl;
} }
} }
}; };
template <size_t size> class RawData { template<size_t size> class RawData {
public: public:
char data[size]; char data[size];
RawData() {} RawData() {}
RawData(const RawData &raw_data) { strcpy(data, raw_data.data); } RawData(const RawData &raw_data) { strcpy(data, raw_data.data); }
// void operator=(const RawData &raw_data){ // void operator=(const RawData &raw_data){
// strcpy(data, raw_data.data); // strcpy(data, raw_data.data);
// } // }
}; };
template <typename... Ts> struct Variant { template<typename... Ts> struct Variant {
Variant(const Variant &variant) { Variant(const Variant &variant) {
// std::cout << " 赋值构造函数 " << std::endl; // std::cout << " 赋值构造函数 " << std::endl;
type_id = variant.type_id; type_id = variant.type_id;
...@@ -70,13 +70,13 @@ namespace paddle_mobile { ...@@ -70,13 +70,13 @@ namespace paddle_mobile {
// helper::Destroy(type_id, &data); // helper::Destroy(type_id, &data);
} }
template <typename T, typename... Args> void Set(Args &&... args) { template<typename T, typename... Args> void Set(Args &&... args) {
helper::Destroy(type_id, &data); helper::Destroy(type_id, &data);
new (&data) T(std::forward<Args>(args)...); new(&data) T(std::forward<Args>(args)...);
type_id = typeid(T).hash_code(); type_id = typeid(T).hash_code();
} }
template <typename T> T &Get() const { template<typename T> T &Get() const {
if (type_id == typeid(T).hash_code()) { if (type_id == typeid(T).hash_code()) {
return *const_cast<T *>(reinterpret_cast<const T *>(&data)); return *const_cast<T *>(reinterpret_cast<const T *>(&data));
} else { } else {
...@@ -87,13 +87,13 @@ namespace paddle_mobile { ...@@ -87,13 +87,13 @@ namespace paddle_mobile {
size_t TypeId() const { return type_id; } size_t TypeId() const { return type_id; }
private: private:
static inline size_t invalid_type() { return typeid(void).hash_code(); } static inline size_t invalid_type() { return typeid(void).hash_code(); }
typedef VariantHelper<Ts...> helper; typedef VariantHelper<Ts...> helper;
size_t type_id; size_t type_id;
RawData<helper::size> data; RawData<helper::size> data;
}; };
template <typename T> struct Vistor { typedef T type_t; }; template<typename T> struct Vistor { typedef T type_t; };
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -16,8 +16,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ...@@ -16,8 +16,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
==============================================================================*/ ==============================================================================*/
#include "attribute.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework {} namespace framework {}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -22,12 +22,12 @@ SOFTWARE. ...@@ -22,12 +22,12 @@ SOFTWARE.
#include "framework.pb.h" #include "framework.pb.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
class BlockDesc; class BlockDesc;
class Attribute { class Attribute {
public: public:
static Attribute static Attribute
GetAttrValue(const proto::OpDesc::Attr &attr_desc) { GetAttrValue(const proto::OpDesc::Attr &attr_desc) {
// std::cout << "begin get attr value" << std::endl; // std::cout << "begin get attr value" << std::endl;
...@@ -94,38 +94,38 @@ namespace paddle_mobile { ...@@ -94,38 +94,38 @@ namespace paddle_mobile {
} }
Attribute() {} Attribute() {}
template <typename T, typename... Args> template<typename T, typename... Args>
Attribute &Set(Args &&... args) { Attribute &Set(Args &&... args) {
variant_.Set<T>(args...); variant_.Set<T>(args...);
return *this; return *this;
} }
template <typename T> T &Get() const { return variant_.Get<T>(); } template<typename T> T &Get() const { return variant_.Get<T>(); }
private: private:
Variant<int, float, std::string, std::vector<int>, Variant<int, float, std::string, std::vector<int>,
std::vector<float>, std::vector<std::string>, bool, std::vector<float>, std::vector<std::string>, bool,
std::vector<bool>, BlockDesc *, int64_t> std::vector<bool>, BlockDesc *, int64_t>
variant_; variant_;
}; };
using AttributeMap = std::unordered_map<std::string, Attribute>; using AttributeMap = std::unordered_map<std::string, Attribute>;
class AttrReader { class AttrReader {
public: public:
explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {} explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {}
template <typename T> inline T Get(const std::string &name) const { template<typename T> inline T Get(const std::string &name) const {
// PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should // PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should
// be in // be in
// AttributeMap", // AttributeMap",
// name); // name);
return ((Attribute)attrs_.at(name)).Get<T>(); return ((Attribute) attrs_.at(name)).Get<T>();
} }
private: private:
const AttributeMap &attrs_; const AttributeMap &attrs_;
}; };
} // namespace framework } // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -24,10 +24,10 @@ SOFTWARE. ...@@ -24,10 +24,10 @@ SOFTWARE.
#include "var_desc.h" #include "var_desc.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
class BlockDesc : PaddleMobileObject { class BlockDesc : PaddleMobileObject {
public: public:
BlockDesc(const proto::BlockDesc &desc); BlockDesc(const proto::BlockDesc &desc);
const int &ID() const { return desc_.idx(); } const int &ID() const { return desc_.idx(); }
...@@ -49,18 +49,18 @@ namespace paddle_mobile { ...@@ -49,18 +49,18 @@ namespace paddle_mobile {
std::vector<std::shared_ptr<VarDesc>> Vars() const; std::vector<std::shared_ptr<VarDesc>> Vars() const;
std::vector<std::shared_ptr<OpDesc>> Ops() const; std::vector<std::shared_ptr<OpDesc>> Ops() const;
private: private:
proto::BlockDesc desc_; proto::BlockDesc desc_;
std::vector<std::shared_ptr<OpDesc>> ops_; std::vector<std::shared_ptr<OpDesc>> ops_;
std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_; std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_;
}; };
} // namespace framework } // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
namespace std { namespace std {
template <> struct hash<paddle_mobile::framework::BlockDesc> { template<> struct hash<paddle_mobile::framework::BlockDesc> {
typedef paddle_mobile::framework::BlockDesc argument_type; typedef paddle_mobile::framework::BlockDesc argument_type;
typedef std::size_t result_type; typedef std::size_t result_type;
result_type operator()(argument_type const &s) const noexcept { result_type operator()(argument_type const &s) const noexcept {
...@@ -68,6 +68,6 @@ namespace std { ...@@ -68,6 +68,6 @@ namespace std {
result_type const h2(std::hash<int>{}(s.ID())); result_type const h2(std::hash<int>{}(s.ID()));
return h1 ^ (h2 << 1); return h1 ^ (h2 << 1);
} }
}; };
} // namespace std } // namespace std
...@@ -19,15 +19,15 @@ limitations under the License. */ ...@@ -19,15 +19,15 @@ limitations under the License. */
#include <string> #include <string>
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
enum class DataLayout { enum class DataLayout {
kNHWC = 0, kNHWC = 0,
kNCHW = 1, kNCHW = 1,
kAnyLayout = 2, kAnyLayout = 2,
}; };
inline DataLayout StringToDataLayout(const std::string &str) { inline DataLayout StringToDataLayout(const std::string &str) {
std::string s(str); std::string s(str);
for (size_t i = 0; i < s.size(); ++i) { for (size_t i = 0; i < s.size(); ++i) {
s[i] = toupper(s[i]); s[i] = toupper(s[i]);
...@@ -42,27 +42,23 @@ namespace paddle_mobile { ...@@ -42,27 +42,23 @@ namespace paddle_mobile {
} else { } else {
// std::cout << "Unknown storage order string: %s", s; // std::cout << "Unknown storage order string: %s", s;
} }
} }
inline std::string DataLayoutToString(const DataLayout &data_layout) { inline std::string DataLayoutToString(const DataLayout &data_layout) {
switch (data_layout) { switch (data_layout) {
case DataLayout::kNHWC: case DataLayout::kNHWC:return "NHWC";
return "NHWC"; case DataLayout::kNCHW:return "NCHW";
case DataLayout::kNCHW: case DataLayout::kAnyLayout:return "ANY_LAYOUT";
return "NCHW"; default:break;
case DataLayout::kAnyLayout:
return "ANY_LAYOUT";
default:
break;
// std::cout << "unknown DataLayou %d", data_layout; // std::cout << "unknown DataLayou %d", data_layout;
} }
} }
inline std::ostream &operator<<(std::ostream &out, inline std::ostream &operator<<(std::ostream &out,
const DataLayout &l) { const DataLayout &l) {
out << DataLayoutToString(l); out << DataLayoutToString(l);
return out; return out;
} }
} // namespace framework } // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -21,14 +21,14 @@ SOFTWARE. ...@@ -21,14 +21,14 @@ SOFTWARE.
#include "data_transform.h" #include "data_transform.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
static void PassTensorData(Tensor *from, Tensor *to) { static void PassTensorData(Tensor *from, Tensor *to) {
to->ShareDataWith(*from); to->ShareDataWith(*from);
*from = Tensor(); *from = Tensor();
} }
void DataTransform(const OpKernelType &expected_kernel_type, void DataTransform(const OpKernelType &expected_kernel_type,
const OpKernelType &kernel_type_for_var, const OpKernelType &kernel_type_for_var,
const Tensor &input_tensor, Tensor *output_tensor) { const Tensor &input_tensor, Tensor *output_tensor) {
bool transformed = false; bool transformed = false;
...@@ -66,9 +66,9 @@ namespace paddle_mobile { ...@@ -66,9 +66,9 @@ namespace paddle_mobile {
// check!"); // check!");
// get output data // get output data
output_tensor->ShareDataWith(in); output_tensor->ShareDataWith(in);
} }
void CopyVariableWithTensor(const Variable &in_var, void CopyVariableWithTensor(const Variable &in_var,
const Tensor &tensor, Variable &out_var) { const Tensor &tensor, Variable &out_var) {
// if (in_var.IsType<LoDTensor>()) { // if (in_var.IsType<LoDTensor>()) {
// auto& in_lod_tensor = in_var.Get<LoDTensor>(); // auto& in_lod_tensor = in_var.Get<LoDTensor>();
...@@ -86,7 +86,7 @@ namespace paddle_mobile { ...@@ -86,7 +86,7 @@ namespace paddle_mobile {
// } else { // } else {
// PADDLE_THROW("unknown var type"); // PADDLE_THROW("unknown var type");
// } // }
} }
} // namespace framework } // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -19,20 +19,19 @@ SOFTWARE. ...@@ -19,20 +19,19 @@ SOFTWARE.
#include "conv_op.h" #include "conv_op.h"
#include "framework/data_type.h" #include "framework/data_type.h"
#include "framework/op_proto_maker.h" #include "framework/op_proto_maker.h"
#include "framework/operator.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
int ConvOutputSize(int input_size, int filter_size, int dilation, int ConvOutputSize(int input_size, int filter_size, int dilation,
int padding, int stride) { int padding, int stride) {
const int dkernel = dilation * (filter_size - 1) + 1; const int dkernel = dilation * (filter_size - 1) + 1;
int output_size = (input_size + 2 * padding - dkernel) / stride + 1; int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
return output_size; return output_size;
} }
template <typename Dtype, typename T> template<typename Dtype, typename T>
void ConvOp<Dtype, T>::InferShape() const { void ConvOp<Dtype, T>::InferShape() const {
// std::cout << " begin get dims: " << std::endl; // std::cout << " begin get dims: " << std::endl;
auto in_dims = param_.Input()->dims(); auto in_dims = param_.Input()->dims();
...@@ -68,9 +67,9 @@ namespace paddle_mobile { ...@@ -68,9 +67,9 @@ namespace paddle_mobile {
framework::DDim ddim = framework::make_ddim(output_shape); framework::DDim ddim = framework::make_ddim(output_shape);
param_.Output()->Resize(ddim); param_.Output()->Resize(ddim);
} }
template class ConvOp<CPU, float>; template class ConvOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -22,13 +22,13 @@ SOFTWARE. ...@@ -22,13 +22,13 @@ SOFTWARE.
#include "operators/kernel/conv_kernel.h" #include "operators/kernel/conv_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
template <typename DeviceType, typename T> template<typename DeviceType, typename T>
class ConvOp : public framework::OperatorWithKernel<DeviceType> { class ConvOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
ConvOp(const std::string &type, const VariableNameMap &inputs, ConvOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
...@@ -46,9 +46,9 @@ namespace paddle_mobile { ...@@ -46,9 +46,9 @@ namespace paddle_mobile {
this->ClearVariables(); this->ClearVariables();
} }
private: private:
ConvParam param_; ConvParam param_;
}; };
} // operators } // operators
} // paddle_mobile } // paddle_mobile
...@@ -19,13 +19,13 @@ SOFTWARE. ...@@ -19,13 +19,13 @@ SOFTWARE.
#include "elementwise_add_op.h" #include "elementwise_add_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template<typename Dtype, typename T>
void ElementwiseAddOp<Dtype, T>::InferShape() const { void ElementwiseAddOp<Dtype, T>::InferShape() const {
auto x_dim = param_.InputX()->dims(); auto x_dim = param_.InputX()->dims();
param_.Out()->Resize(x_dim); param_.Out()->Resize(x_dim);
} }
template class ElementwiseAddOp<CPU, float>; template class ElementwiseAddOp<CPU, float>;
} }
} }
...@@ -21,14 +21,14 @@ SOFTWARE. ...@@ -21,14 +21,14 @@ SOFTWARE.
#include "op_param.h" #include "op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
template <typename DeviceType, typename T> template<typename DeviceType, typename T>
class ElementwiseAddOp class ElementwiseAddOp
: public framework::OperatorWithKernel<DeviceType> { : public framework::OperatorWithKernel<DeviceType> {
public: public:
ElementwiseAddOp(const std::string &type, ElementwiseAddOp(const std::string &type,
const VariableNameMap &inputs, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
...@@ -48,8 +48,8 @@ namespace paddle_mobile { ...@@ -48,8 +48,8 @@ namespace paddle_mobile {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
ElementwiseAddParam param_; ElementwiseAddParam param_;
}; };
} }
} }
...@@ -19,9 +19,9 @@ SOFTWARE. ...@@ -19,9 +19,9 @@ SOFTWARE.
#include "operators/kernel/conv_kernel.h" #include "operators/kernel/conv_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
bool IsExpand(const std::vector<int64_t> &filter_dim, bool IsExpand(const std::vector<int64_t> &filter_dim,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, const std::vector<int> &paddings,
const std::vector<int> &dilations) { const std::vector<int> &dilations) {
...@@ -35,10 +35,10 @@ namespace paddle_mobile { ...@@ -35,10 +35,10 @@ namespace paddle_mobile {
dilation_1 = dilation_1 && (dilations[j] == 1); dilation_1 = dilation_1 && (dilations[j] == 1);
} }
return !(filter_1 && strides_1 && padding_0 && dilation_1); return !(filter_1 && strides_1 && padding_0 && dilation_1);
} }
template <> template<>
void ConvKernel<CPU, float, ConvParam>::Compute( void ConvKernel<CPU, float, ConvParam>::Compute(
const ConvParam &param) const { const ConvParam &param) const {
LOG(kLOG_DEBUG) << param; LOG(kLOG_DEBUG) << param;
...@@ -156,9 +156,9 @@ namespace paddle_mobile { ...@@ -156,9 +156,9 @@ namespace paddle_mobile {
float(1.0), &out_slice, float(0.0)); float(1.0), &out_slice, float(0.0));
} }
} }
} }
template class ConvKernel<CPU, float, ConvParam>; template class ConvKernel<CPU, float, ConvParam>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -17,14 +17,14 @@ limitations under the License. */ ...@@ -17,14 +17,14 @@ limitations under the License. */
#include "operators/kernel/elementwise_add_kernel.h" #include "operators/kernel/elementwise_add_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename T> struct AddFunctor { template<typename T> struct AddFunctor {
inline T operator()(T a, T b) const { return a + b; } inline T operator()(T a, T b) const { return a + b; }
}; };
template <> template<>
void ElementwiseAddKernel<CPU, float, ElementwiseAddParam>::Compute( void ElementwiseAddKernel<CPU, float, ElementwiseAddParam>::Compute(
const ElementwiseAddParam &param) const { const ElementwiseAddParam &param) const {
const Tensor *input_x = param.InputX(); const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY(); const Tensor *input_y = param.InputY();
...@@ -33,9 +33,9 @@ namespace paddle_mobile { ...@@ -33,9 +33,9 @@ namespace paddle_mobile {
const int axis = param.Axis(); const int axis = param.Axis();
ElementwiseComputeEx<AddFunctor<float>, float>( ElementwiseComputeEx<AddFunctor<float>, float>(
input_x, input_y, axis, AddFunctor<float>(), Out); input_x, input_y, axis, AddFunctor<float>(), Out);
} }
template class ElementwiseAddKernel<CPU, float, ElementwiseAddParam>; template class ElementwiseAddKernel<CPU, float, ElementwiseAddParam>;
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -21,11 +21,11 @@ SOFTWARE. ...@@ -21,11 +21,11 @@ SOFTWARE.
#include "operators/kernel/mul_kernel.h" #include "operators/kernel/mul_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <> template<>
void void
MulKernel<CPU, float, MulParam>::Compute(const MulParam &param) const { MulKernel<CPU, float, MulParam>::Compute(const MulParam &param) const {
const Tensor *input_x = param.InputX(); const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY(); const Tensor *input_y = param.InputY();
Tensor *out = param.Out(); Tensor *out = param.Out();
...@@ -48,9 +48,9 @@ namespace paddle_mobile { ...@@ -48,9 +48,9 @@ namespace paddle_mobile {
if (out_dim.size() != 2) { if (out_dim.size() != 2) {
out->Resize(out_dim); out->Resize(out_dim);
} }
} }
template class MulKernel<CPU, float, MulParam>; template class MulKernel<CPU, float, MulParam>;
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -25,15 +25,15 @@ SOFTWARE. ...@@ -25,15 +25,15 @@ SOFTWARE.
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
template <typename DeviceType, typename T, typename P> template<typename DeviceType, typename T, typename P>
class ConvKernel class ConvKernel
: public framework::OpKernelBase<DeviceType, ConvParam> { : public framework::OpKernelBase<DeviceType, ConvParam> {
public: public:
void Compute(const ConvParam &param) const; void Compute(const ConvParam &param) const;
}; };
} }
} }
...@@ -22,15 +22,15 @@ SOFTWARE. ...@@ -22,15 +22,15 @@ SOFTWARE.
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
template <typename DeviceType, typename T, typename P> template<typename DeviceType, typename T, typename P>
class ElementwiseAddKernel class ElementwiseAddKernel
: public framework::OpKernelBase<DeviceType, ElementwiseAddParam> { : public framework::OpKernelBase<DeviceType, ElementwiseAddParam> {
public: public:
void Compute(const ElementwiseAddParam &param) const; void Compute(const ElementwiseAddParam &param) const;
}; };
} }
} }
...@@ -16,15 +16,13 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ...@@ -16,15 +16,13 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
==============================================================================*/ ==============================================================================*/
#include "operators/kernel/conv_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
// template<> // template<>
// void ConvKernel<FPGA, float>::Compute(const ConvParam &param) const // void ConvKernel<FPGA, float>::Compute(const ConvParam &param) const
// {} // {}
// //
// template class ConvKernel<FPGA, float>; // template class ConvKernel<FPGA, float>;
} }
} }
...@@ -22,14 +22,14 @@ SOFTWARE. ...@@ -22,14 +22,14 @@ SOFTWARE.
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
template <typename DeviceType, typename T, typename P> template<typename DeviceType, typename T, typename P>
class MulKernel : public framework::OpKernelBase<DeviceType, MulParam> { class MulKernel : public framework::OpKernelBase<DeviceType, MulParam> {
public: public:
void Compute(const MulParam &param) const; void Compute(const MulParam &param) const;
}; };
} }
} }
...@@ -18,9 +18,9 @@ limitations under the License. */ ...@@ -18,9 +18,9 @@ limitations under the License. */
#define UNLIKELY(condition) __builtin_expect(static_cast<bool>(condition), 0) #define UNLIKELY(condition) __builtin_expect(static_cast<bool>(condition), 0)
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
/* /*
* Out = X ⊙ Y * Out = X ⊙ Y
* If Y's shape does not match X' shape, they will be reshaped. * If Y's shape does not match X' shape, they will be reshaped.
* For example: * For example:
...@@ -31,7 +31,7 @@ namespace paddle_mobile { ...@@ -31,7 +31,7 @@ namespace paddle_mobile {
* pre=2*3, n=4*5, post=1 * pre=2*3, n=4*5, post=1
* x.shape(6, 20, 1) * y.shape(1, 20, 1).broadcast(6, 20, 1) * x.shape(6, 20, 1) * y.shape(1, 20, 1).broadcast(6, 20, 1)
*/ */
inline void get_mid_dims(const framework::DDim &x_dims, inline void get_mid_dims(const framework::DDim &x_dims,
const framework::DDim &y_dims, const int axis, const framework::DDim &y_dims, const int axis,
int *pre, int *n, int *post) { int *pre, int *n, int *post) {
*pre = 1; *pre = 1;
...@@ -51,10 +51,10 @@ namespace paddle_mobile { ...@@ -51,10 +51,10 @@ namespace paddle_mobile {
for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) { for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) {
(*post) *= x_dims[i]; (*post) *= x_dims[i];
} }
} }
/// remove dims tail 1. (4,20,1,1) -> (4,20) /// remove dims tail 1. (4,20,1,1) -> (4,20)
inline void trim_trailing_singular_dims(framework::DDim *dims) { inline void trim_trailing_singular_dims(framework::DDim *dims) {
// Remove trailing dimensions of size 1 for y // Remove trailing dimensions of size 1 for y
auto actual_dims_size = dims->size(); auto actual_dims_size = dims->size();
for (; actual_dims_size != 0; --actual_dims_size) { for (; actual_dims_size != 0; --actual_dims_size) {
...@@ -66,10 +66,10 @@ namespace paddle_mobile { ...@@ -66,10 +66,10 @@ namespace paddle_mobile {
actual_dims.resize(actual_dims_size); actual_dims.resize(actual_dims_size);
*dims = framework::make_ddim(actual_dims); *dims = framework::make_ddim(actual_dims);
} }
} }
template <typename T> class RowwiseTransformIterator { template<typename T> class RowwiseTransformIterator {
public: public:
RowwiseTransformIterator(const T *ptr, int n) RowwiseTransformIterator(const T *ptr, int n)
: ptr_(ptr), i_(0), n_(n) {} : ptr_(ptr), i_(0), n_(n) {}
...@@ -91,18 +91,18 @@ namespace paddle_mobile { ...@@ -91,18 +91,18 @@ namespace paddle_mobile {
const T &operator*() { return ptr_[i_]; } const T &operator*() { return ptr_[i_]; }
private: private:
const T *ptr_; const T *ptr_;
int i_; int i_;
int64_t n_; int64_t n_;
}; };
/// (4,20,2)+(20,): (20,) just as (20,1), when move 2 strides in last /// (4,20,2)+(20,): (20,) just as (20,1), when move 2 strides in last
/// dimension /// dimension
/// in (4,20,2) is 2 , /// in (4,20,2) is 2 ,
/// (20,1) move 1 stride , to fill(add) 2 element with the same number. /// (20,1) move 1 stride , to fill(add) 2 element with the same number.
template <typename T> class MidWiseTransformIterator { template<typename T> class MidWiseTransformIterator {
public: public:
MidWiseTransformIterator(const T *ptr, int n, int post) MidWiseTransformIterator(const T *ptr, int n, int post)
: ptr_(ptr), i_(0), j_(0), n_(n), post_(post) {} : ptr_(ptr), i_(0), j_(0), n_(n), post_(post) {}
...@@ -128,17 +128,17 @@ namespace paddle_mobile { ...@@ -128,17 +128,17 @@ namespace paddle_mobile {
const T &operator*() { return ptr_[i_]; } const T &operator*() { return ptr_[i_]; }
private: private:
const T *ptr_; const T *ptr_;
int64_t i_; int64_t i_;
int64_t j_; int64_t j_;
int64_t n_; int64_t n_;
int64_t post_; int64_t post_;
}; };
template <typename Functor, typename T, typename OutType = T> template<typename Functor, typename T, typename OutType = T>
class TransformFunctor { class TransformFunctor {
public: public:
TransformFunctor(const framework::Tensor *x, TransformFunctor(const framework::Tensor *x,
const framework::Tensor *y, framework::Tensor *z, const framework::Tensor *y, framework::Tensor *z,
Functor func) Functor func)
...@@ -164,16 +164,16 @@ namespace paddle_mobile { ...@@ -164,16 +164,16 @@ namespace paddle_mobile {
z_, func_); z_, func_);
} }
private: private:
const T *x_; const T *x_;
const T *y_; const T *y_;
OutType *z_; OutType *z_;
int64_t nx_; int64_t nx_;
Functor func_; Functor func_;
}; };
template <typename Functor, typename T, typename OutType = T> template<typename Functor, typename T, typename OutType = T>
void ElementwiseComputeEx(const framework::Tensor *x, void ElementwiseComputeEx(const framework::Tensor *x,
const framework::Tensor *y, int axis, const framework::Tensor *y, int axis,
Functor func, framework::Tensor *z) { Functor func, framework::Tensor *z) {
TransformFunctor<Functor, T, OutType> functor(x, y, z, func); TransformFunctor<Functor, T, OutType> functor(x, y, z, func);
...@@ -205,7 +205,7 @@ namespace paddle_mobile { ...@@ -205,7 +205,7 @@ namespace paddle_mobile {
functor.RunMidWise(n, pre, post); functor.RunMidWise(n, pre, post);
return; return;
} }
} }
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -16,17 +16,17 @@ limitations under the License. */ ...@@ -16,17 +16,17 @@ limitations under the License. */
#include "common/types.h" #include "common/types.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
/* /*
* im = [input_channels, input_height, input_width] * im = [input_channels, input_height, input_width]
* col = * col =
* [input_channels, filter_height, filter_width, output_height, * [input_channels, filter_height, filter_width, output_height,
* output_width] * output_width]
*/ */
template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> { template<class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
public: public:
void operator()(const framework::Tensor &im, void operator()(const framework::Tensor &im,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
...@@ -95,16 +95,16 @@ namespace paddle_mobile { ...@@ -95,16 +95,16 @@ namespace paddle_mobile {
} }
} }
} }
}; };
/* /*
* im = [input_channels, input_height, input_width] * im = [input_channels, input_height, input_width]
* col = * col =
* [input_channels, filter_height, filter_width, output_height, * [input_channels, filter_height, filter_width, output_height,
* output_width] * output_width]
*/ */
template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> { template<class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
public: public:
void operator()(const framework::Tensor &col, void operator()(const framework::Tensor &col,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
...@@ -172,21 +172,21 @@ namespace paddle_mobile { ...@@ -172,21 +172,21 @@ namespace paddle_mobile {
} }
} }
} }
}; };
template class Im2ColFunctor<ColFormat::kCFO, CPU, float>; template class Im2ColFunctor<ColFormat::kCFO, CPU, float>;
template class Im2ColFunctor<ColFormat::kCFO, CPU, double>; template class Im2ColFunctor<ColFormat::kCFO, CPU, double>;
template class Col2ImFunctor<ColFormat::kCFO, CPU, float>; template class Col2ImFunctor<ColFormat::kCFO, CPU, float>;
template class Col2ImFunctor<ColFormat::kCFO, CPU, double>; template class Col2ImFunctor<ColFormat::kCFO, CPU, double>;
/* /*
* im = [input_channels, input_height, input_width] * im = [input_channels, input_height, input_width]
* col = * col =
* [output_height, output_width, input_channels, filter_height, * [output_height, output_width, input_channels, filter_height,
* filter_width] * filter_width]
*/ */
template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> { template<class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
public: public:
void operator()(const framework::Tensor &im, void operator()(const framework::Tensor &im,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
...@@ -238,7 +238,7 @@ namespace paddle_mobile { ...@@ -238,7 +238,7 @@ namespace paddle_mobile {
filter_col_idx - padding[1]; filter_col_idx - padding[1];
int col_offset = int col_offset =
((((col_row_idx)*col_width + ((((col_row_idx) * col_width +
col_col_idx) * col_col_idx) *
im_channels + im_channels +
channel) * channel) *
...@@ -264,16 +264,16 @@ namespace paddle_mobile { ...@@ -264,16 +264,16 @@ namespace paddle_mobile {
} }
} }
} }
}; };
/* /*
* im = [input_channels, input_height, input_width] * im = [input_channels, input_height, input_width]
* col = * col =
* [output_height, output_width, input_channels, filter_height, * [output_height, output_width, input_channels, filter_height,
* filter_width] * filter_width]
*/ */
template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> { template<class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
public: public:
void operator()(const framework::Tensor &col, void operator()(const framework::Tensor &col,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
...@@ -352,13 +352,13 @@ namespace paddle_mobile { ...@@ -352,13 +352,13 @@ namespace paddle_mobile {
} }
} }
} }
}; };
template class Im2ColFunctor<ColFormat::kOCF, CPU, float>; template class Im2ColFunctor<ColFormat::kOCF, CPU, float>;
template class Im2ColFunctor<ColFormat::kOCF, CPU, double>; template class Im2ColFunctor<ColFormat::kOCF, CPU, double>;
template class Col2ImFunctor<ColFormat::kOCF, CPU, float>; template class Col2ImFunctor<ColFormat::kOCF, CPU, float>;
template class Col2ImFunctor<ColFormat::kOCF, CPU, double>; template class Col2ImFunctor<ColFormat::kOCF, CPU, double>;
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -17,14 +17,14 @@ limitations under the License. */ ...@@ -17,14 +17,14 @@ limitations under the License. */
#include "framework/tensor.h" #include "framework/tensor.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
/* The storage format of the coldata in the Im2ColFunctor and /* The storage format of the coldata in the Im2ColFunctor and
* Col2ImFunctor. */ * Col2ImFunctor. */
enum class ColFormat { kCFO = 0, kOCF = 1 }; enum class ColFormat { kCFO = 0, kOCF = 1 };
/* /*
* \brief Converts the image data of three dimensions(CHW) into a * \brief Converts the image data of three dimensions(CHW) into a
* colData of * colData of
* five dimensions in the Im2ColFunctor calculation, * five dimensions in the Im2ColFunctor calculation,
...@@ -87,26 +87,26 @@ namespace paddle_mobile { ...@@ -87,26 +87,26 @@ namespace paddle_mobile {
* equal to * equal to
* colShape.inputChannels. * colShape.inputChannels.
*/ */
template <ColFormat Format, typename DeviceType, typename T> template<ColFormat Format, typename DeviceType, typename T>
class Im2ColFunctor { class Im2ColFunctor {
public: public:
void operator()(const framework::Tensor &im, void operator()(const framework::Tensor &im,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
const std::vector<int> &padding, const std::vector<int> &padding,
framework::Tensor *col); framework::Tensor *col);
}; };
template <ColFormat Format, typename DeviceType, typename T> template<ColFormat Format, typename DeviceType, typename T>
class Col2ImFunctor { class Col2ImFunctor {
public: public:
void operator()(const framework::Tensor &col, void operator()(const framework::Tensor &col,
const std::vector<int> &dilation, const std::vector<int> &dilation,
const std::vector<int> &stride, const std::vector<int> &stride,
const std::vector<int> &padding, const std::vector<int> &padding,
framework::Tensor *im); framework::Tensor *im);
}; };
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -15,11 +15,11 @@ limitations under the License. */ ...@@ -15,11 +15,11 @@ limitations under the License. */
#include "math_function.h" #include "math_function.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
template <> template<>
void gemm<float>(const CBLAS_TRANSPOSE transA, void gemm<float>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K, const float alpha, const int N, const int K, const float alpha,
const float *A, const float *B, const float beta, const float *A, const float *B, const float beta,
...@@ -29,10 +29,10 @@ namespace paddle_mobile { ...@@ -29,10 +29,10 @@ namespace paddle_mobile {
int ldc = N; int ldc = N;
cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A,
lda, B, ldb, beta, C, ldc); lda, B, ldb, beta, C, ldc);
} }
template <> template<>
void gemm<double>(const CBLAS_TRANSPOSE transA, void gemm<double>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K, const double alpha, const int N, const int K, const double alpha,
const double *A, const double *B, const double *A, const double *B,
...@@ -42,10 +42,10 @@ namespace paddle_mobile { ...@@ -42,10 +42,10 @@ namespace paddle_mobile {
int ldc = N; int ldc = N;
cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A,
lda, B, ldb, beta, C, ldc); lda, B, ldb, beta, C, ldc);
} }
template <> template<>
void gemm<float>(const bool transA, const bool transB, const int M, void gemm<float>(const bool transA, const bool transB, const int M,
const int N, const int K, const float alpha, const int N, const int K, const float alpha,
const float *A, const int lda, const float *B, const float *A, const int lda, const float *B,
const int ldb, const float beta, float *C, const int ldb, const float beta, float *C,
...@@ -54,10 +54,10 @@ namespace paddle_mobile { ...@@ -54,10 +54,10 @@ namespace paddle_mobile {
transA == false ? CblasNoTrans : CblasTrans, transA == false ? CblasNoTrans : CblasTrans,
transB == false ? CblasNoTrans : CblasTrans, M, N, transB == false ? CblasNoTrans : CblasTrans, M, N,
K, alpha, A, lda, B, ldb, beta, C, ldc); K, alpha, A, lda, B, ldb, beta, C, ldc);
} }
template <> template<>
void gemm<double>(const bool transA, const bool transB, const int M, void gemm<double>(const bool transA, const bool transB, const int M,
const int N, const int K, const double alpha, const int N, const int K, const double alpha,
const double *A, const int lda, const double *B, const double *A, const int lda, const double *B,
const int ldb, const double beta, double *C, const int ldb, const double beta, double *C,
...@@ -66,10 +66,10 @@ namespace paddle_mobile { ...@@ -66,10 +66,10 @@ namespace paddle_mobile {
transA == false ? CblasNoTrans : CblasTrans, transA == false ? CblasNoTrans : CblasTrans,
transB == false ? CblasNoTrans : CblasTrans, M, N, transB == false ? CblasNoTrans : CblasTrans, M, N,
K, alpha, A, lda, B, ldb, beta, C, ldc); K, alpha, A, lda, B, ldb, beta, C, ldc);
} }
template <> template<>
void matmul<float>(const framework::Tensor &matrix_a, bool trans_a, void matmul<float>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, const framework::Tensor &matrix_b, bool trans_b,
float alpha, framework::Tensor *matrix_out, float alpha, framework::Tensor *matrix_out,
float beta) { float beta) {
...@@ -99,10 +99,10 @@ namespace paddle_mobile { ...@@ -99,10 +99,10 @@ namespace paddle_mobile {
gemm<float>(transA, transB, M, N, K, alpha, gemm<float>(transA, transB, M, N, K, alpha,
matrix_a.data<float>(), matrix_b.data<float>(), matrix_a.data<float>(), matrix_b.data<float>(),
beta, matrix_out->data<float>()); beta, matrix_out->data<float>());
} }
template <> template<>
void matmul<double>(const framework::Tensor &matrix_a, bool trans_a, void matmul<double>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, const framework::Tensor &matrix_b, bool trans_b,
double alpha, framework::Tensor *matrix_out, double alpha, framework::Tensor *matrix_out,
double beta) { double beta) {
...@@ -132,8 +132,8 @@ namespace paddle_mobile { ...@@ -132,8 +132,8 @@ namespace paddle_mobile {
gemm<double>(transA, transB, M, N, K, alpha, gemm<double>(transA, transB, M, N, K, alpha,
matrix_a.data<double>(), matrix_b.data<double>(), matrix_a.data<double>(), matrix_b.data<double>(),
beta, matrix_out->data<double>()); beta, matrix_out->data<double>());
} }
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -19,26 +19,26 @@ limitations under the License. */ ...@@ -19,26 +19,26 @@ limitations under the License. */
#include <cmath> #include <cmath>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
template <typename T> template<typename T>
void gemm(const CBLAS_TRANSPOSE transA, void gemm(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const CBLAS_TRANSPOSE transB, const int M, const int N,
const int K, const T alpha, const T *A, const T *B, const int K, const T alpha, const T *A, const T *B,
const T beta, T *C); const T beta, T *C);
template <typename T> template<typename T>
void gemm(const bool transA, const bool transB, const int M, void gemm(const bool transA, const bool transB, const int M,
const int N, const int K, const T alpha, const T *A, const int N, const int K, const T alpha, const T *A,
const int lda, const T *B, const int ldb, const T beta, const int lda, const T *B, const int ldb, const T beta,
T *C, const int ldc); T *C, const int ldc);
// matrix multiply with continuous memory // matrix multiply with continuous memory
template <typename T> template<typename T>
void matmul(const framework::Tensor &matrix_a, bool trans_a, void matmul(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, const framework::Tensor &matrix_b, bool trans_b,
T alpha, framework::Tensor *matrix_out, T beta); T alpha, framework::Tensor *matrix_out, T beta);
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -17,41 +17,41 @@ limitations under the License. */ ...@@ -17,41 +17,41 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
// Transform applys a unary or a binary functor on each element in a // Transform applys a unary or a binary functor on each element in a
// range defined by a pair of iterators. // range defined by a pair of iterators.
// //
// - The specialization for CPU calls std::transform. // - The specialization for CPU calls std::transform.
// - The specialization for CUDA calls thrust::tranform. // - The specialization for CUDA calls thrust::tranform.
// //
// NOTE: We need to define InputIter and OutputIter defined as // NOTE: We need to define InputIter and OutputIter defined as
// different types, because the InputIter points op's inputs // different types, because the InputIter points op's inputs
// and // and
// OutputIter pints to op's outputs. // OutputIter pints to op's outputs.
// //
// NOTE: We don't assume that InputIter to be const InputType* and // NOTE: We don't assume that InputIter to be const InputType* and
// OutputIter to be OutputType*, because we might use a // OutputIter to be OutputType*, because we might use a
// iterator // iterator
// class, paddle::fluid::operators::RowwiseTRansformIterator. // class, paddle::fluid::operators::RowwiseTRansformIterator.
struct Transform { struct Transform {
template <typename InputIter, typename OutputIter, template<typename InputIter, typename OutputIter,
typename UnaryOperation> typename UnaryOperation>
void operator()(InputIter first, InputIter last, void operator()(InputIter first, InputIter last,
OutputIter result, UnaryOperation op) { OutputIter result, UnaryOperation op) {
std::transform(first, last, result, op); std::transform(first, last, result, op);
} }
template <typename InputIter1, typename InputIter2, template<typename InputIter1, typename InputIter2,
typename OutputIter, typename BinaryOperation> typename OutputIter, typename BinaryOperation>
void operator()(InputIter1 first1, InputIter1 last1, void operator()(InputIter1 first1, InputIter1 last1,
InputIter2 first2, OutputIter result, InputIter2 first2, OutputIter result,
BinaryOperation op) { BinaryOperation op) {
std::transform(first1, last1, first2, result, op); std::transform(first1, last1, first2, result, op);
} }
}; };
} }
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -15,18 +15,18 @@ limitations under the License. */ ...@@ -15,18 +15,18 @@ limitations under the License. */
#include "vol2col.h" #include "vol2col.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
using Tensor = paddle_mobile::framework::Tensor; using Tensor = paddle_mobile::framework::Tensor;
/* /*
* vol = [input_channels, input_depth, input_height, input_width] * vol = [input_channels, input_depth, input_height, input_width]
* col = * col =
* [input_channels, filter_depth, filter_height, filter_width, * [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width] * output_depth, output_height, output_width]
*/ */
template <typename T> class Vol2ColFunctor<CPU, T> { template<typename T> class Vol2ColFunctor<CPU, T> {
public: public:
void operator()(const Tensor &vol, void operator()(const Tensor &vol,
const std::vector<int> &dilations, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
...@@ -116,16 +116,16 @@ namespace paddle_mobile { ...@@ -116,16 +116,16 @@ namespace paddle_mobile {
} }
} }
} }
}; };
/* /*
* vol = [input_channels,input_depth, input_height, input_width] * vol = [input_channels,input_depth, input_height, input_width]
* col = * col =
* [input_channels, filter_depth, filter_height, filter_width, * [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width] * output_depth, output_height, output_width]
*/ */
template <typename T> class Col2VolFunctor<CPU, T> { template<typename T> class Col2VolFunctor<CPU, T> {
public: public:
void operator()(const Tensor &col, void operator()(const Tensor &col,
const std::vector<int> &dilations, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
...@@ -214,13 +214,13 @@ namespace paddle_mobile { ...@@ -214,13 +214,13 @@ namespace paddle_mobile {
} }
} }
} }
}; };
template class Vol2ColFunctor<CPU, float>; template class Vol2ColFunctor<CPU, float>;
template class Vol2ColFunctor<CPU, double>; template class Vol2ColFunctor<CPU, double>;
template class Col2VolFunctor<CPU, float>; template class Col2VolFunctor<CPU, float>;
template class Col2VolFunctor<CPU, double>; template class Col2VolFunctor<CPU, double>;
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -18,9 +18,9 @@ limitations under the License. */ ...@@ -18,9 +18,9 @@ limitations under the License. */
#include "framework/tensor.h" #include "framework/tensor.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
/* /*
* \brief Converts the feature data of four dimensions(CDHW) into a * \brief Converts the feature data of four dimensions(CDHW) into a
* colData of * colData of
* seven dimensions in the Vol2ColFunctor calculation, * seven dimensions in the Vol2ColFunctor calculation,
...@@ -70,26 +70,26 @@ namespace paddle_mobile { ...@@ -70,26 +70,26 @@ namespace paddle_mobile {
* equal to * equal to
* colShape.inputChannels. * colShape.inputChannels.
*/ */
using Tensor = paddle_mobile::framework::Tensor; using Tensor = paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> class Vol2ColFunctor { template<typename DeviceType, typename T> class Vol2ColFunctor {
public: public:
void operator()(const Tensor &vol, void operator()(const Tensor &vol,
const std::vector<int> &dilations, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, const std::vector<int> &paddings,
Tensor *col) const; Tensor *col) const;
}; };
template <typename DeviceType, typename T> class Col2VolFunctor { template<typename DeviceType, typename T> class Col2VolFunctor {
public: public:
void operator()(const Tensor &col, void operator()(const Tensor &col,
const std::vector<int> &dilations, const std::vector<int> &dilations,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, const std::vector<int> &paddings,
Tensor *vol) const; Tensor *vol) const;
}; };
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -19,10 +19,10 @@ SOFTWARE. ...@@ -19,10 +19,10 @@ SOFTWARE.
#include "mul_op.h" #include "mul_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template<typename Dtype, typename T>
void MulOp<Dtype, T>::InferShape() const { void MulOp<Dtype, T>::InferShape() const {
auto x_dims = param_.InputX()->dims(); auto x_dims = param_.InputX()->dims();
auto y_dims = param_.InputY()->dims(); auto y_dims = param_.InputY()->dims();
int x_num_col_dims = param_.XNumColDims(); int x_num_col_dims = param_.XNumColDims();
...@@ -51,7 +51,7 @@ namespace paddle_mobile { ...@@ -51,7 +51,7 @@ namespace paddle_mobile {
framework::DDim ddim = framework::make_ddim(output_dims); framework::DDim ddim = framework::make_ddim(output_dims);
param_.Out()->Resize(ddim); param_.Out()->Resize(ddim);
} }
template class MulOp<CPU, float>; template class MulOp<CPU, float>;
} }
} }
...@@ -21,13 +21,13 @@ SOFTWARE. ...@@ -21,13 +21,13 @@ SOFTWARE.
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
template <typename DeviceType, typename T> template<typename DeviceType, typename T>
class MulOp : public framework::OperatorWithKernel<DeviceType> { class MulOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
MulOp(const std::string &type, const VariableNameMap &inputs, MulOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap attrs, const framework::AttributeMap attrs,
...@@ -44,9 +44,9 @@ namespace paddle_mobile { ...@@ -44,9 +44,9 @@ namespace paddle_mobile {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
MulParam param_; MulParam param_;
}; };
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -19,8 +19,8 @@ SOFTWARE. ...@@ -19,8 +19,8 @@ SOFTWARE.
#include "op_param.h" #include "op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
Print &operator<<(Print &printer, const ConvParam &conv_param) { Print &operator<<(Print &printer, const ConvParam &conv_param) {
printer << "parameter of conv: " printer << "parameter of conv: "
<< "\n"; << "\n";
printer << " stride: " printer << " stride: "
...@@ -40,6 +40,6 @@ namespace paddle_mobile { ...@@ -40,6 +40,6 @@ namespace paddle_mobile {
printer << " filter dims: " << conv_param.Filter()->dims() << "\n"; printer << " filter dims: " << conv_param.Filter()->dims() << "\n";
printer << " output dims: " << conv_param.Output()->dims(); printer << " output dims: " << conv_param.Output()->dims();
return printer; return printer;
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -26,61 +26,61 @@ SOFTWARE. ...@@ -26,61 +26,61 @@ SOFTWARE.
#include "framework/variable.h" #include "framework/variable.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
class OpParam : PaddleMobileObject { class OpParam : PaddleMobileObject {
public: public:
protected: protected:
template <typename T> template<typename T>
static T *InputFrom(const VariableNameMap &inputs, static T *InputFrom(const VariableNameMap &inputs,
const Scope &scope) { const Scope &scope) {
return GetVarValue<T>("Input", inputs, scope); return GetVarValue<T>("Input", inputs, scope);
} }
template <typename T> template<typename T>
static T *InputXFrom(const VariableNameMap &inputs, static T *InputXFrom(const VariableNameMap &inputs,
const Scope &scope) { const Scope &scope) {
return GetVarValue<T>("X", inputs, scope); return GetVarValue<T>("X", inputs, scope);
} }
template <typename T> template<typename T>
static T *InputYFrom(const VariableNameMap &inputs, static T *InputYFrom(const VariableNameMap &inputs,
const Scope &scope) { const Scope &scope) {
return GetVarValue<T>("Y", inputs, scope); return GetVarValue<T>("Y", inputs, scope);
} }
template <typename T> template<typename T>
static std::vector<T *> static std::vector<T *>
InputMultiFrom(const VariableNameMap &inputs, const Scope &scope) { InputMultiFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetMultiVarValue<T>("Input", inputs, scope); return GetMultiVarValue<T>("Input", inputs, scope);
} }
template <typename T> template<typename T>
static T *OutputFrom(const VariableNameMap &outputs, static T *OutputFrom(const VariableNameMap &outputs,
const Scope &scope) { const Scope &scope) {
return GetVarValue<T>("Output", outputs, scope); return GetVarValue<T>("Output", outputs, scope);
} }
template <typename T> template<typename T>
static T *OutFrom(const VariableNameMap &outputs, static T *OutFrom(const VariableNameMap &outputs,
const Scope &scope) { const Scope &scope) {
return GetVarValue<T>("Out", outputs, scope); return GetVarValue<T>("Out", outputs, scope);
} }
template <typename T> template<typename T>
static T *FilterFrom(const VariableNameMap &inputs, static T *FilterFrom(const VariableNameMap &inputs,
const Scope &scope) { const Scope &scope) {
return GetVarValue<T>("Filter", inputs, scope); return GetVarValue<T>("Filter", inputs, scope);
} }
template <typename T> template<typename T>
static const T GetAttr(std::string key, const AttributeMap &map) { static const T GetAttr(std::string key, const AttributeMap &map) {
return ((Attribute)map.at(key)).Get<T>(); return ((Attribute) map.at(key)).Get<T>();
} }
template <typename T> template<typename T>
static T *GetVarValue(std::string key, static T *GetVarValue(std::string key,
const VariableNameMap &var_map, const VariableNameMap &var_map,
const Scope &scope) { const Scope &scope) {
...@@ -95,7 +95,7 @@ namespace paddle_mobile { ...@@ -95,7 +95,7 @@ namespace paddle_mobile {
} }
} }
template <typename T> template<typename T>
static std::vector<T *> static std::vector<T *>
GetMultiVarValue(std::string key, const VariableNameMap &var_map, GetMultiVarValue(std::string key, const VariableNameMap &var_map,
const Scope &scope) { const Scope &scope) {
...@@ -108,10 +108,10 @@ namespace paddle_mobile { ...@@ -108,10 +108,10 @@ namespace paddle_mobile {
} }
return var_res; return var_res;
} }
}; };
class ConvParam : OpParam { class ConvParam : OpParam {
public: public:
ConvParam(const VariableNameMap &inputs, ConvParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
...@@ -139,7 +139,7 @@ namespace paddle_mobile { ...@@ -139,7 +139,7 @@ namespace paddle_mobile {
const int &Groups() const { return groups; } const int &Groups() const { return groups; }
private: private:
Tensor *input_; Tensor *input_;
Tensor *output_; Tensor *output_;
LoDTensor *filter_; LoDTensor *filter_;
...@@ -147,12 +147,12 @@ namespace paddle_mobile { ...@@ -147,12 +147,12 @@ namespace paddle_mobile {
std::vector<int> paddings_; std::vector<int> paddings_;
std::vector<int> dilations_; std::vector<int> dilations_;
int groups; int groups;
}; };
Print &operator<<(Print &printer, const ConvParam &conv_param); Print &operator<<(Print &printer, const ConvParam &conv_param);
class ElementwiseAddParam : OpParam { class ElementwiseAddParam : OpParam {
public: public:
ElementwiseAddParam(const VariableNameMap &inputs, ElementwiseAddParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
...@@ -171,15 +171,15 @@ namespace paddle_mobile { ...@@ -171,15 +171,15 @@ namespace paddle_mobile {
const int &Axis() const { return axis_; } const int &Axis() const { return axis_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_y_; Tensor *input_y_;
Tensor *out_; Tensor *out_;
int axis_; int axis_;
}; };
class MulParam : OpParam { class MulParam : OpParam {
public: public:
MulParam(const VariableNameMap &inputs, MulParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
...@@ -201,16 +201,16 @@ namespace paddle_mobile { ...@@ -201,16 +201,16 @@ namespace paddle_mobile {
const int &YNumColDims() const { return y_num_col_dims_; } const int &YNumColDims() const { return y_num_col_dims_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_y_; Tensor *input_y_;
Tensor *out_; Tensor *out_;
int x_num_col_dims_; int x_num_col_dims_;
int y_num_col_dims_; int y_num_col_dims_;
}; };
class ConcatParam : public OpParam { class ConcatParam : public OpParam {
public: public:
ConcatParam(const VariableNameMap &inputs, ConcatParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
...@@ -226,11 +226,11 @@ namespace paddle_mobile { ...@@ -226,11 +226,11 @@ namespace paddle_mobile {
const int &Axis() const { return axis_; } const int &Axis() const { return axis_; }
private: private:
std::vector<Tensor *> inputs_; std::vector<Tensor *> inputs_;
Tensor *out_; Tensor *out_;
int axis_; int axis_;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#!/bin/bash #!/bin/bash
set -e
TOTAL_ERRORS=0 TOTAL_ERRORS=0
#iclang-tidy *.[ch]pp -checks=*
# The trick to remove deleted files: https://stackoverflow.com/a/2413151 # The trick to remove deleted files: https://stackoverflow.com/a/2413151
for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'|grep -v ".pb." | grep -v "third-party/"); do for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}' | grep "src" | grep -v ".pb."); do
cpplint $file echo "clang-tidy formating $file"
clang-tidy $file
TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?);
done done
exit $TOTAL_ERRORS exit $TOTAL_ERRORS
#!/bin/bash
set -e
readonly VERSION="3.8"
version=$(clang-format -version)
if ! [[ $version == *"$VERSION"* ]]; then
echo "clang-format version check failed."
echo "a version contains '$VERSION' is needed, but get '$version'"
echo "you can install the right version, and make an soft-link to '\$PATH' env"
exit -1
fi
clang-format $@
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册