未验证 提交 8f7b020b 编写于 作者: W Wu Yi 提交者: GitHub

fix develop build issue (#10978)

* fix develop build issue

* fix google style

* cpplint check only fluid
上级 8075a11f
...@@ -19,7 +19,7 @@ BasedOnStyle: Google ...@@ -19,7 +19,7 @@ BasedOnStyle: Google
IndentWidth: 2 IndentWidth: 2
TabWidth: 2 TabWidth: 2
ContinuationIndentWidth: 4 ContinuationIndentWidth: 4
AccessModifierOffset: -2 # The private/protected/public has no indent in class AccessModifierOffset: -1 # The private/protected/public has no indent in class
Standard: Cpp11 Standard: Cpp11
AllowAllParametersOfDeclarationOnNextLine: true AllowAllParametersOfDeclarationOnNextLine: true
BinPackParameters: false BinPackParameters: false
......
...@@ -94,7 +94,7 @@ void UpdateCallback::apply(Parameter* p) { ...@@ -94,7 +94,7 @@ void UpdateCallback::apply(Parameter* p) {
} }
class UpdateCallbackWrapper { class UpdateCallbackWrapper {
public: public:
explicit UpdateCallbackWrapper(const UpdateCallback& callback) explicit UpdateCallbackWrapper(const UpdateCallback& callback)
: callback(const_cast<UpdateCallback&>(callback)) {} : callback(const_cast<UpdateCallback&>(callback)) {}
...@@ -105,7 +105,7 @@ public: ...@@ -105,7 +105,7 @@ public:
delete p; delete p;
} }
private: private:
UpdateCallback& callback; UpdateCallback& callback;
}; };
......
...@@ -59,9 +59,10 @@ class RangeError {}; ...@@ -59,9 +59,10 @@ class RangeError {};
/// Not support Error, such as access GPU memory directly, etc. /// Not support Error, such as access GPU memory directly, etc.
class UnsupportError : public std::runtime_error { class UnsupportError : public std::runtime_error {
public: public:
UnsupportError() : std::runtime_error(" "){}; UnsupportError() : std::runtime_error(" ") {}
UnsupportError(const std::string& message) : std::runtime_error(message){}; explicit UnsupportError(const std::string& message)
: std::runtime_error(message) {}
}; };
/// This type will map to python's list of float. /// This type will map to python's list of float.
...@@ -105,7 +106,7 @@ class Matrix { ...@@ -105,7 +106,7 @@ class Matrix {
DISABLE_COPY(Matrix); DISABLE_COPY(Matrix);
static Matrix* createByPaddleMatrixPtr(void* sharedPtr); static Matrix* createByPaddleMatrixPtr(void* sharedPtr);
public: public:
virtual ~Matrix(); virtual ~Matrix();
/** /**
...@@ -231,7 +232,7 @@ public: ...@@ -231,7 +232,7 @@ public:
bool isGpu() const; bool isGpu() const;
private: private:
void* getSharedPtr() const; void* getSharedPtr() const;
MatrixPrivate* m; MatrixPrivate* m;
...@@ -248,7 +249,7 @@ class Vector { ...@@ -248,7 +249,7 @@ class Vector {
void* getSharedPtr(); void* getSharedPtr();
public: public:
~Vector(); ~Vector();
/// Create Vector filled with zero. /// Create Vector filled with zero.
...@@ -310,10 +311,10 @@ public: ...@@ -310,10 +311,10 @@ public:
/// __len__ in python /// __len__ in python
size_t getSize() const; size_t getSize() const;
private: private:
VectorPrivate* m; VectorPrivate* m;
private: private:
friend class Parameter; friend class Parameter;
friend class ParameterOptimizer; friend class ParameterOptimizer;
friend struct ParameterTraverseCallbackPrivate; friend struct ParameterTraverseCallbackPrivate;
...@@ -325,7 +326,7 @@ class IVector { ...@@ -325,7 +326,7 @@ class IVector {
DISABLE_COPY(IVector); DISABLE_COPY(IVector);
static IVector* createByPaddleVectorPtr(void* ptr); static IVector* createByPaddleVectorPtr(void* ptr);
public: public:
/// Create IVector filled with zero /// Create IVector filled with zero
static IVector* createZero(size_t sz, bool useGpu = isUsingGpu()); static IVector* createZero(size_t sz, bool useGpu = isUsingGpu());
...@@ -389,7 +390,7 @@ public: ...@@ -389,7 +390,7 @@ public:
/// This method will map to python __len__(); /// This method will map to python __len__();
size_t getSize() const; size_t getSize() const;
private: private:
void* getSharedPtr() const; void* getSharedPtr() const;
friend class Arguments; friend class Arguments;
...@@ -400,11 +401,11 @@ struct ArgumentsPrivate; ...@@ -400,11 +401,11 @@ struct ArgumentsPrivate;
/// The Arguments is actual a std::vector<paddle::Argument> in paddle. /// The Arguments is actual a std::vector<paddle::Argument> in paddle.
class Arguments { class Arguments {
private: private:
Arguments(); // Internal Create. Arguments(); // Internal Create.
DISABLE_COPY(Arguments); DISABLE_COPY(Arguments);
public: public:
/** /**
* Create a arguments with size. * Create a arguments with size.
* Note that it can be zero. * Note that it can be zero.
...@@ -475,12 +476,12 @@ public: ...@@ -475,12 +476,12 @@ public:
float sum() const; float sum() const;
private: private:
static Arguments* createByPaddleArgumentVector(void* ptr); static Arguments* createByPaddleArgumentVector(void* ptr);
static Arguments* createByPaddleArgument(const void* ptr); static Arguments* createByPaddleArgument(const void* ptr);
void* getInternalArgumentsPtr() const; void* getInternalArgumentsPtr() const;
private: private:
ArgumentsPrivate* m; ArgumentsPrivate* m;
friend class Trainer; friend class Trainer;
friend class GradientMachine; friend class GradientMachine;
...@@ -507,7 +508,7 @@ class ParameterConfig { ...@@ -507,7 +508,7 @@ class ParameterConfig {
static ParameterConfig* createParameterConfigFromParameterPtr(void* ptr); static ParameterConfig* createParameterConfigFromParameterPtr(void* ptr);
void* getRawPtr(); void* getRawPtr();
public: public:
~ParameterConfig(); ~ParameterConfig();
/** /**
...@@ -515,10 +516,10 @@ public: ...@@ -515,10 +516,10 @@ public:
*/ */
std::string toProtoString() const; std::string toProtoString() const;
private: private:
ParameterConfigPrivate* m; ParameterConfigPrivate* m;
private: private:
friend class Parameter; friend class Parameter;
friend class ParameterOptimizer; friend class ParameterOptimizer;
friend struct ParameterTraverseCallbackPrivate; friend struct ParameterTraverseCallbackPrivate;
...@@ -529,7 +530,7 @@ class OptimizationConfig { ...@@ -529,7 +530,7 @@ class OptimizationConfig {
DISABLE_COPY(OptimizationConfig); DISABLE_COPY(OptimizationConfig);
OptimizationConfig(); OptimizationConfig();
public: public:
static OptimizationConfig* createFromProtoString(const std::string& str); static OptimizationConfig* createFromProtoString(const std::string& str);
~OptimizationConfig(); ~OptimizationConfig();
...@@ -538,7 +539,7 @@ public: ...@@ -538,7 +539,7 @@ public:
*/ */
std::string toProtoString(); std::string toProtoString();
private: private:
OptimizationConfigPrivate* m; OptimizationConfigPrivate* m;
friend class TrainerConfig; friend class TrainerConfig;
...@@ -549,11 +550,11 @@ private: ...@@ -549,11 +550,11 @@ private:
struct ParameterPrivate; struct ParameterPrivate;
class Parameter { class Parameter {
private: private:
Parameter(); Parameter();
DISABLE_COPY(Parameter); DISABLE_COPY(Parameter);
public: public:
virtual ~Parameter(); virtual ~Parameter();
/** /**
...@@ -580,11 +581,11 @@ public: ...@@ -580,11 +581,11 @@ public:
size_t getSize() const; size_t getSize() const;
private: private:
static Parameter* createFromRawPtr(void* ptr); static Parameter* createFromRawPtr(void* ptr);
static Parameter* createFromSharedPtr(void* ptr); static Parameter* createFromSharedPtr(void* ptr);
private: private:
ParameterPrivate* m; ParameterPrivate* m;
friend class UpdateCallbackWrapper; friend class UpdateCallbackWrapper;
friend class GradientMachine; friend class GradientMachine;
...@@ -598,14 +599,14 @@ struct ModelConfigPrivate; ...@@ -598,14 +599,14 @@ struct ModelConfigPrivate;
* It is used by GradientMachine. * It is used by GradientMachine.
*/ */
class ModelConfig { class ModelConfig {
private: private:
ModelConfig(); ModelConfig();
DISABLE_COPY(ModelConfig); DISABLE_COPY(ModelConfig);
public: public:
virtual ~ModelConfig(); virtual ~ModelConfig();
private: private:
ModelConfigPrivate* m; ModelConfigPrivate* m;
friend class TrainerConfig; friend class TrainerConfig;
friend struct TrainerConfigPrivate; friend struct TrainerConfigPrivate;
...@@ -619,11 +620,11 @@ struct TrainerConfigPrivate; ...@@ -619,11 +620,11 @@ struct TrainerConfigPrivate;
* It is used by GradientMachine. * It is used by GradientMachine.
*/ */
class TrainerConfig { class TrainerConfig {
private: private:
TrainerConfig(); TrainerConfig();
DISABLE_COPY(TrainerConfig); DISABLE_COPY(TrainerConfig);
public: public:
virtual ~TrainerConfig(); virtual ~TrainerConfig();
static TrainerConfig* createFromTrainerConfigFile( static TrainerConfig* createFromTrainerConfigFile(
...@@ -634,7 +635,7 @@ public: ...@@ -634,7 +635,7 @@ public:
OptimizationConfig* getOptimizationConfig() const; OptimizationConfig* getOptimizationConfig() const;
private: private:
TrainerConfigPrivate* m; TrainerConfigPrivate* m;
friend class Trainer; friend class Trainer;
}; };
...@@ -654,7 +655,7 @@ private: ...@@ -654,7 +655,7 @@ private:
* @endcode * @endcode
*/ */
class UpdateCallback { class UpdateCallback {
public: public:
virtual ~UpdateCallback(); virtual ~UpdateCallback();
virtual void apply(Parameter* p); virtual void apply(Parameter* p);
}; };
...@@ -664,14 +665,14 @@ class ParameterTraverseCallback { ...@@ -664,14 +665,14 @@ class ParameterTraverseCallback {
DISABLE_COPY(ParameterTraverseCallback); DISABLE_COPY(ParameterTraverseCallback);
ParameterTraverseCallback(); ParameterTraverseCallback();
public: public:
~ParameterTraverseCallback(); ~ParameterTraverseCallback();
void apply(const std::vector<Vector*>& vecs, void apply(const std::vector<Vector*>& vecs,
const ParameterConfig& config, const ParameterConfig& config,
size_t sparseId); size_t sparseId);
private: private:
ParameterTraverseCallbackPrivate* m; ParameterTraverseCallbackPrivate* m;
friend class ParameterOptimizer; friend class ParameterOptimizer;
}; };
...@@ -686,7 +687,7 @@ class ParameterOptimizer { ...@@ -686,7 +687,7 @@ class ParameterOptimizer {
DISABLE_COPY(ParameterOptimizer); DISABLE_COPY(ParameterOptimizer);
ParameterOptimizer(); ParameterOptimizer();
public: public:
static ParameterOptimizer* create(OptimizationConfig* config); static ParameterOptimizer* create(OptimizationConfig* config);
~ParameterOptimizer(); ~ParameterOptimizer();
...@@ -710,7 +711,7 @@ public: ...@@ -710,7 +711,7 @@ public:
ParameterTraverseCallback* needSpecialTraversal( ParameterTraverseCallback* needSpecialTraversal(
const ParameterConfig& config) const; const ParameterConfig& config) const;
private: private:
ParameterOptimizerPrivate* m; ParameterOptimizerPrivate* m;
}; };
...@@ -718,11 +719,11 @@ class SequenceGenerator; ...@@ -718,11 +719,11 @@ class SequenceGenerator;
class Evaluator; class Evaluator;
struct GradientMachinePrivate; struct GradientMachinePrivate;
class GradientMachine { class GradientMachine {
private: private:
GradientMachine(); GradientMachine();
DISABLE_COPY(GradientMachine); DISABLE_COPY(GradientMachine);
public: public:
virtual ~GradientMachine(); virtual ~GradientMachine();
/** /**
...@@ -817,7 +818,7 @@ public: ...@@ -817,7 +818,7 @@ public:
void eval(Evaluator* evaluator); void eval(Evaluator* evaluator);
private: private:
GradientMachinePrivate* m; GradientMachinePrivate* m;
static GradientMachine* createFromPaddleModelPtr( static GradientMachine* createFromPaddleModelPtr(
...@@ -833,10 +834,10 @@ private: ...@@ -833,10 +834,10 @@ private:
struct ParameterUpdaterPrivate; struct ParameterUpdaterPrivate;
class ParameterUpdater { class ParameterUpdater {
private: private:
ParameterUpdater(); ParameterUpdater();
public: public:
static ParameterUpdater* createLocalUpdater(OptimizationConfig* config); static ParameterUpdater* createLocalUpdater(OptimizationConfig* config);
static ParameterUpdater* createRemoteUpdater(OptimizationConfig* config, static ParameterUpdater* createRemoteUpdater(OptimizationConfig* config,
int passCount, int passCount,
...@@ -911,17 +912,17 @@ public: ...@@ -911,17 +912,17 @@ public:
*/ */
void catchUpWith(); void catchUpWith();
private: private:
ParameterUpdaterPrivate* m; ParameterUpdaterPrivate* m;
}; };
struct EvaluatorPrivate; struct EvaluatorPrivate;
class Evaluator { class Evaluator {
private: private:
Evaluator(); Evaluator();
DISABLE_COPY(Evaluator); DISABLE_COPY(Evaluator);
public: public:
~Evaluator(); ~Evaluator();
/** /**
...@@ -945,7 +946,7 @@ public: ...@@ -945,7 +946,7 @@ public:
double getValue(const std::string name) const; double getValue(const std::string name) const;
private: private:
EvaluatorPrivate* m; EvaluatorPrivate* m;
friend class GradientMachine; friend class GradientMachine;
...@@ -953,13 +954,13 @@ private: ...@@ -953,13 +954,13 @@ private:
struct TrainerPrivate; struct TrainerPrivate;
class Trainer { class Trainer {
private: private:
TrainerPrivate* m; TrainerPrivate* m;
Trainer(); Trainer();
Trainer(TrainerConfig* optConfig, GradientMachine* gm); Trainer(TrainerConfig* optConfig, GradientMachine* gm);
DISABLE_COPY(Trainer); DISABLE_COPY(Trainer);
public: public:
virtual ~Trainer(); virtual ~Trainer();
/// Create A Trainer By TrainerConfig. using paddle command line. /// Create A Trainer By TrainerConfig. using paddle command line.
...@@ -1002,7 +1003,7 @@ public: ...@@ -1002,7 +1003,7 @@ public:
/// the N-Best results generated from one input sequence. /// the N-Best results generated from one input sequence.
class ISequenceResults { class ISequenceResults {
public: public:
virtual ~ISequenceResults(); virtual ~ISequenceResults();
/// Number of result. /// Number of result.
...@@ -1026,7 +1027,7 @@ class SequenceGenerator { ...@@ -1026,7 +1027,7 @@ class SequenceGenerator {
DISABLE_COPY(SequenceGenerator); DISABLE_COPY(SequenceGenerator);
SequenceGenerator(); SequenceGenerator();
public: public:
virtual ~SequenceGenerator(); virtual ~SequenceGenerator();
/** /**
...@@ -1044,10 +1045,10 @@ public: ...@@ -1044,10 +1045,10 @@ public:
void setMaxLength(size_t maxlength); void setMaxLength(size_t maxlength);
void setBeamSize(size_t beamSize); void setBeamSize(size_t beamSize);
private: private:
static SequenceGenerator* createByGradientMachineSharedPtr(void* ptr); static SequenceGenerator* createByGradientMachineSharedPtr(void* ptr);
friend class GradientMachine; friend class GradientMachine;
private: private:
SequenceGeneratorPrivate* m; SequenceGeneratorPrivate* m;
}; };
...@@ -138,7 +138,7 @@ struct SequenceGeneratorPrivate { ...@@ -138,7 +138,7 @@ struct SequenceGeneratorPrivate {
maxLength(0UL), maxLength(0UL),
feedback(__create_feedback__()) {} feedback(__create_feedback__()) {}
private: private:
static paddle::Argument __create_feedback__() { static paddle::Argument __create_feedback__() {
paddle::Argument feedback; paddle::Argument feedback;
feedback.ids = paddle::IVector::create(/* size= */ 1, FLAGS_use_gpu); feedback.ids = paddle::IVector::create(/* size= */ 1, FLAGS_use_gpu);
...@@ -157,7 +157,7 @@ SequenceGenerator::~SequenceGenerator() { delete m; } ...@@ -157,7 +157,7 @@ SequenceGenerator::~SequenceGenerator() { delete m; }
class PathSequenceResults : public ISequenceResults { class PathSequenceResults : public ISequenceResults {
// ISequenceResults interface // ISequenceResults interface
public: public:
PathSequenceResults(const std::shared_ptr<std::vector<Path>>& path, PathSequenceResults(const std::shared_ptr<std::vector<Path>>& path,
const std::shared_ptr<std::vector<std::string>>& dict) const std::shared_ptr<std::vector<std::string>>& dict)
: path_(path), dict_(dict) {} : path_(path), dict_(dict) {}
...@@ -196,7 +196,7 @@ public: ...@@ -196,7 +196,7 @@ public:
} }
} }
private: private:
std::shared_ptr<std::vector<Path>> path_; std::shared_ptr<std::vector<Path>> path_;
std::shared_ptr<std::vector<std::string>> dict_; std::shared_ptr<std::vector<std::string>> dict_;
}; };
......
...@@ -26,7 +26,7 @@ enum GradientMatchineCreateMode { ...@@ -26,7 +26,7 @@ enum GradientMatchineCreateMode {
namespace paddle { namespace paddle {
class MyNeuralNetwork : public NeuralNetwork { class MyNeuralNetwork : public NeuralNetwork {
public: public:
MyNeuralNetwork(const std::string& name, NeuralNetwork* network) MyNeuralNetwork(const std::string& name, NeuralNetwork* network)
: NeuralNetwork(name, network) {} : NeuralNetwork(name, network) {}
}; };
......
...@@ -50,7 +50,7 @@ struct PaddleTensor { ...@@ -50,7 +50,7 @@ struct PaddleTensor {
* TODO(Superjomn) Prepare another API for NLP-related usages. * TODO(Superjomn) Prepare another API for NLP-related usages.
*/ */
class PaddlePredictor { class PaddlePredictor {
public: public:
struct Config; struct Config;
PaddlePredictor() = default; PaddlePredictor() = default;
PaddlePredictor(const PaddlePredictor&) = delete; PaddlePredictor(const PaddlePredictor&) = delete;
...@@ -66,6 +66,7 @@ public: ...@@ -66,6 +66,7 @@ public:
// be thread-safe. // be thread-safe.
virtual std::unique_ptr<PaddlePredictor> Clone() = 0; virtual std::unique_ptr<PaddlePredictor> Clone() = 0;
virtual bool InitShared() { return false; }
// Destroy the Predictor. // Destroy the Predictor.
virtual ~PaddlePredictor() {} virtual ~PaddlePredictor() {}
......
...@@ -28,7 +28,7 @@ namespace { ...@@ -28,7 +28,7 @@ namespace {
// Timer for timer // Timer for timer
class Timer { class Timer {
public: public:
double start; double start;
double startu; double startu;
void tic() { void tic() {
...@@ -135,8 +135,8 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs, ...@@ -135,8 +135,8 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs,
std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() { std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() {
VLOG(3) << "Predictor::clone"; VLOG(3) << "Predictor::clone";
std::unique_ptr<PaddlePredictorImpl> cls(new PaddlePredictorImpl(config_)); std::unique_ptr<PaddlePredictor> cls(new PaddlePredictorImpl(config_));
if (!cls->InitShared(this)) { if (!cls->InitShared()) {
LOG(ERROR) << "fail to call InitShared"; LOG(ERROR) << "fail to call InitShared";
return nullptr; return nullptr;
} }
...@@ -144,7 +144,7 @@ std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() { ...@@ -144,7 +144,7 @@ std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() {
} }
// TODO(panyx0718): Consider merge with Init()? // TODO(panyx0718): Consider merge with Init()?
bool PaddlePredictorImpl::InitShared(PaddlePredictorImpl *cls) { bool PaddlePredictorImpl::InitShared() {
VLOG(3) << "Predictor::init_shared"; VLOG(3) << "Predictor::init_shared";
// 1. Define place, executor, scope // 1. Define place, executor, scope
if (this->config_.device >= 0) { if (this->config_.device >= 0) {
......
...@@ -41,7 +41,7 @@ struct VisConfig : public PaddlePredictor::Config { ...@@ -41,7 +41,7 @@ struct VisConfig : public PaddlePredictor::Config {
* Do not use this, just a demo indicating how to customize a Predictor. * Do not use this, just a demo indicating how to customize a Predictor.
*/ */
class PaddlePredictorImpl : public PaddlePredictor { class PaddlePredictorImpl : public PaddlePredictor {
public: public:
explicit PaddlePredictorImpl(const VisConfig &config) : config_(config) {} explicit PaddlePredictorImpl(const VisConfig &config) : config_(config) {}
bool Init(); bool Init();
...@@ -53,8 +53,8 @@ public: ...@@ -53,8 +53,8 @@ public:
~PaddlePredictorImpl() override{}; ~PaddlePredictorImpl() override{};
private: private:
bool InitShared(PaddlePredictorImpl *cls); bool InitShared();
bool SetFeed(const std::vector<PaddleTensor> &input_datas, bool SetFeed(const std::vector<PaddleTensor> &input_datas,
std::vector<paddle::framework::LoDTensor> *feeds); std::vector<paddle::framework::LoDTensor> *feeds);
bool GetFetch(const std::vector<paddle::framework::LoDTensor> &fetchs, bool GetFetch(const std::vector<paddle::framework::LoDTensor> &fetchs,
......
...@@ -31,7 +31,7 @@ struct DemoConfig : public PaddlePredictor::Config { ...@@ -31,7 +31,7 @@ struct DemoConfig : public PaddlePredictor::Config {
* Do not use this, just a demo indicating how to customize a Predictor. * Do not use this, just a demo indicating how to customize a Predictor.
*/ */
class DemoPredictor : public PaddlePredictor { class DemoPredictor : public PaddlePredictor {
public: public:
explicit DemoPredictor(const DemoConfig &config) { explicit DemoPredictor(const DemoConfig &config) {
LOG(INFO) << "I get other_config " << config.other_config; LOG(INFO) << "I get other_config " << config.other_config;
} }
......
...@@ -31,7 +31,7 @@ namespace hppl { ...@@ -31,7 +31,7 @@ namespace hppl {
*/ */
template <class T> template <class T>
class Active { class Active {
public: public:
typedef T (*forward)(T); typedef T (*forward)(T);
typedef T (*backward)(T, T); typedef T (*backward)(T, T);
}; };
......
...@@ -23,128 +23,128 @@ namespace unary { ...@@ -23,128 +23,128 @@ namespace unary {
template <class T> template <class T>
class add_scale { class add_scale {
private: private:
const T p; const T p;
public: public:
INLINE add_scale(const T s) : p(s) {} INLINE add_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a + p; } INLINE T operator()(const T a) const { return a + p; }
}; };
template <class T> template <class T>
class sub_scale { class sub_scale {
private: private:
const T p; const T p;
public: public:
INLINE sub_scale(const T s) : p(s) {} INLINE sub_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a - p; } INLINE T operator()(const T a) const { return a - p; }
}; };
template <class T> template <class T>
class mul_scale { class mul_scale {
private: private:
const T p; const T p;
public: public:
INLINE mul_scale(const T s) : p(s) {} INLINE mul_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a * p; } INLINE T operator()(const T a) const { return a * p; }
}; };
template <class T> template <class T>
class div_scale { class div_scale {
private: private:
const T p; const T p;
public: public:
INLINE div_scale(const T s) : p(s) {} INLINE div_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a / p; } INLINE T operator()(const T a) const { return a / p; }
}; };
template <class T> template <class T>
class neg { class neg {
public: public:
INLINE T operator()(const T a) const { return -a; } INLINE T operator()(const T a) const { return -a; }
}; };
template <class T> template <class T>
class exp_op { class exp_op {
public: public:
INLINE T operator()(const T a) const { return std::exp(a); } INLINE T operator()(const T a) const { return std::exp(a); }
}; };
template <class T> template <class T>
class log_op { class log_op {
public: public:
INLINE T operator()(const T a) const { return std::log(a); } INLINE T operator()(const T a) const { return std::log(a); }
}; };
template <class T> template <class T>
class sqrt_op { class sqrt_op {
public: public:
INLINE T operator()(const T a) const { return std::sqrt(a); } INLINE T operator()(const T a) const { return std::sqrt(a); }
}; };
template <class T> template <class T>
class square { class square {
public: public:
INLINE T operator()(const T a) const { return a * a; } INLINE T operator()(const T a) const { return a * a; }
}; };
template <class T> template <class T>
class reciprocal { class reciprocal {
public: public:
INLINE T operator()(const T a) const { return T(1) / a; } INLINE T operator()(const T a) const { return T(1) / a; }
}; };
template <class T> template <class T>
class abs { class abs {
public: public:
INLINE T operator()(const T a) const { return a > 0 ? a : -a; } INLINE T operator()(const T a) const { return a > 0 ? a : -a; }
}; };
template <class T> template <class T>
class sign { class sign {
public: public:
INLINE T operator()(const T a) const { return (a > 0) - (a < 0); } INLINE T operator()(const T a) const { return (a > 0) - (a < 0); }
}; };
template <class T> template <class T>
class min { class min {
private: private:
const T p; const T p;
public: public:
INLINE min(const T s) : p(s) {} INLINE min(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a > p ? p : a; } INLINE T operator()(const T a) const { return a > p ? p : a; }
}; };
template <class T> template <class T>
class max { class max {
private: private:
const T p; const T p;
public: public:
INLINE max(const T s) : p(s) {} INLINE max(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a < p ? p : a; } INLINE T operator()(const T a) const { return a < p ? p : a; }
}; };
template <class T> template <class T>
class pow_op { class pow_op {
private: private:
const T p; const T p;
public: public:
INLINE pow_op(const T s) : p(s) {} INLINE pow_op(const T s) : p(s) {}
INLINE T operator()(const T a) const { return std::pow(a, p); } INLINE T operator()(const T a) const { return std::pow(a, p); }
}; };
template <class T> template <class T>
class constant { class constant {
private: private:
const T p; const T p;
public: public:
INLINE constant(const T s) : p(s) {} INLINE constant(const T s) : p(s) {}
INLINE T operator()(int i) const { return p; } INLINE T operator()(int i) const { return p; }
INLINE T operator()(int i, int j) const { return p; } INLINE T operator()(int i, int j) const { return p; }
...@@ -152,80 +152,80 @@ public: ...@@ -152,80 +152,80 @@ public:
template <class T> template <class T>
class cmp_eq { class cmp_eq {
private: private:
const T p; const T p;
public: public:
INLINE cmp_eq(const T s) : p(s) {} INLINE cmp_eq(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a == p; } INLINE bool operator()(const T a) const { return a == p; }
}; };
template <class T> template <class T>
class cmp_ne { class cmp_ne {
private: private:
const T p; const T p;
public: public:
INLINE cmp_ne(const T s) : p(s) {} INLINE cmp_ne(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a != p; } INLINE bool operator()(const T a) const { return a != p; }
}; };
template <class T> template <class T>
class cmp_le { class cmp_le {
private: private:
const T p; const T p;
public: public:
INLINE cmp_le(const T s) : p(s) {} INLINE cmp_le(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a <= p; } INLINE bool operator()(const T a) const { return a <= p; }
}; };
template <class T> template <class T>
class cmp_lt { class cmp_lt {
private: private:
const T p; const T p;
public: public:
INLINE cmp_lt(const T s) : p(s) {} INLINE cmp_lt(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a < p; } INLINE bool operator()(const T a) const { return a < p; }
}; };
template <class T> template <class T>
class cmp_ge { class cmp_ge {
private: private:
const T p; const T p;
public: public:
INLINE cmp_ge(const T s) : p(s) {} INLINE cmp_ge(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a >= p; } INLINE bool operator()(const T a) const { return a >= p; }
}; };
template <class T> template <class T>
class cmp_gt { class cmp_gt {
private: private:
const T p; const T p;
public: public:
INLINE cmp_gt(const T s) : p(s) {} INLINE cmp_gt(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a > p; } INLINE bool operator()(const T a) const { return a > p; }
}; };
template <class T> template <class T>
class and_op { class and_op {
private: private:
const T p; const T p;
public: public:
INLINE and_op(const T s) : p(s) {} INLINE and_op(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a && p; } INLINE bool operator()(const T a) const { return a && p; }
}; };
template <class T> template <class T>
class or_op { class or_op {
private: private:
const T p; const T p;
public: public:
INLINE or_op(const T s) : p(s) {} INLINE or_op(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a || p; } INLINE bool operator()(const T a) const { return a || p; }
}; };
...@@ -235,96 +235,96 @@ public: ...@@ -235,96 +235,96 @@ public:
namespace binary { namespace binary {
template <class T> template <class T>
class add { class add {
public: public:
INLINE T operator()(const T a, const T b) const { return a + b; } INLINE T operator()(const T a, const T b) const { return a + b; }
}; };
template <class T> template <class T>
class add_scale { class add_scale {
private: private:
const T p1; const T p1;
const T p2; const T p2;
public: public:
INLINE add_scale(const T s1, const T s2) : p1(s1), p2(s2) {} INLINE add_scale(const T s1, const T s2) : p1(s1), p2(s2) {}
INLINE T operator()(const T a, const T b) const { return p1 * a + p2 * b; } INLINE T operator()(const T a, const T b) const { return p1 * a + p2 * b; }
}; };
template <class T> template <class T>
class sub { class sub {
public: public:
INLINE T operator()(const T a, const T b) const { return a - b; } INLINE T operator()(const T a, const T b) const { return a - b; }
}; };
template <class T> template <class T>
class mul { class mul {
public: public:
INLINE T operator()(const T a, const T b) const { return a * b; } INLINE T operator()(const T a, const T b) const { return a * b; }
}; };
template <class T> template <class T>
class div { class div {
public: public:
INLINE T operator()(const T a, const T b) const { return a / b; } INLINE T operator()(const T a, const T b) const { return a / b; }
}; };
template <class T> template <class T>
class cmp_eq { class cmp_eq {
public: public:
INLINE bool operator()(const T a, const T b) const { return a == b; } INLINE bool operator()(const T a, const T b) const { return a == b; }
}; };
template <class T> template <class T>
class cmp_ne { class cmp_ne {
public: public:
INLINE bool operator()(const T a, const T b) const { return a != b; } INLINE bool operator()(const T a, const T b) const { return a != b; }
}; };
template <class T> template <class T>
class cmp_le { class cmp_le {
public: public:
INLINE bool operator()(const T a, const T b) const { return a <= b; } INLINE bool operator()(const T a, const T b) const { return a <= b; }
}; };
template <class T> template <class T>
class cmp_lt { class cmp_lt {
public: public:
INLINE bool operator()(const T a, const T b) const { return a < b; } INLINE bool operator()(const T a, const T b) const { return a < b; }
}; };
template <class T> template <class T>
class cmp_ge { class cmp_ge {
public: public:
INLINE bool operator()(const T a, const T b) const { return a >= b; } INLINE bool operator()(const T a, const T b) const { return a >= b; }
}; };
template <class T> template <class T>
class cmp_gt { class cmp_gt {
public: public:
INLINE bool operator()(const T a, const T b) const { return a > b; } INLINE bool operator()(const T a, const T b) const { return a > b; }
}; };
template <class T> template <class T>
class and_op { class and_op {
public: public:
INLINE bool operator()(const T a, const T b) const { return a && b; } INLINE bool operator()(const T a, const T b) const { return a && b; }
}; };
template <class T> template <class T>
class or_op { class or_op {
public: public:
INLINE bool operator()(const T a, const T b) const { return a || b; } INLINE bool operator()(const T a, const T b) const { return a || b; }
}; };
template <class T> template <class T>
class min { class min {
public: public:
INLINE T operator()(const T a, const T b) const { return a > b ? b : a; } INLINE T operator()(const T a, const T b) const { return a > b ? b : a; }
}; };
template <class T> template <class T>
class max { class max {
public: public:
INLINE T operator()(const T a, const T b) const { return a < b ? b : a; } INLINE T operator()(const T a, const T b) const { return a < b ? b : a; }
}; };
...@@ -332,7 +332,7 @@ public: ...@@ -332,7 +332,7 @@ public:
#ifndef PADDLE_TYPE_DOUBLE #ifndef PADDLE_TYPE_DOUBLE
template <> template <>
class add<__m128> { class add<__m128> {
public: public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const { INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_add_ps(a, b); return _mm_add_ps(a, b);
} }
...@@ -340,11 +340,11 @@ public: ...@@ -340,11 +340,11 @@ public:
template <> template <>
class add_scale<__m128> { class add_scale<__m128> {
private: private:
const __m128 p1; const __m128 p1;
const __m128 p2; const __m128 p2;
public: public:
INLINE add_scale(const __m128 s1, const __m128 s2) : p1(s1), p2(s2) {} INLINE add_scale(const __m128 s1, const __m128 s2) : p1(s1), p2(s2) {}
INLINE __m128 operator()(const __m128 a, const __m128 b) const { INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_add_ps(_mm_mul_ps(p1, a), _mm_mul_ps(p2, b)); return _mm_add_ps(_mm_mul_ps(p1, a), _mm_mul_ps(p2, b));
...@@ -353,7 +353,7 @@ public: ...@@ -353,7 +353,7 @@ public:
template <> template <>
class sub<__m128> { class sub<__m128> {
public: public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const { INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_sub_ps(a, b); return _mm_sub_ps(a, b);
} }
...@@ -361,7 +361,7 @@ public: ...@@ -361,7 +361,7 @@ public:
template <> template <>
class mul<__m128> { class mul<__m128> {
public: public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const { INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_mul_ps(a, b); return _mm_mul_ps(a, b);
} }
...@@ -369,7 +369,7 @@ public: ...@@ -369,7 +369,7 @@ public:
template <> template <>
class div<__m128> { class div<__m128> {
public: public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const { INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_div_ps(a, b); return _mm_div_ps(a, b);
} }
...@@ -377,7 +377,7 @@ public: ...@@ -377,7 +377,7 @@ public:
template <> template <>
class min<__m128> { class min<__m128> {
public: public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const { INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_min_ps(a, b); return _mm_min_ps(a, b);
} }
...@@ -385,7 +385,7 @@ public: ...@@ -385,7 +385,7 @@ public:
template <> template <>
class max<__m128> { class max<__m128> {
public: public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const { INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_max_ps(a, b); return _mm_max_ps(a, b);
} }
...@@ -393,7 +393,7 @@ public: ...@@ -393,7 +393,7 @@ public:
#else #else
template <> template <>
class add<__m128d> { class add<__m128d> {
public: public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const { INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_add_pd(a, b); return _mm_add_pd(a, b);
} }
...@@ -401,11 +401,11 @@ public: ...@@ -401,11 +401,11 @@ public:
template <> template <>
class add_scale<__m128d> { class add_scale<__m128d> {
private: private:
const __m128d p1; const __m128d p1;
const __m128d p2; const __m128d p2;
public: public:
INLINE add_scale(const __m128d s1, const __m128d s2) : p1(s1), p2(s2) {} INLINE add_scale(const __m128d s1, const __m128d s2) : p1(s1), p2(s2) {}
INLINE __m128d operator()(const __m128d a, const __m128d b) const { INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_add_pd(_mm_mul_pd(p1, a), _mm_mul_pd(p2, b)); return _mm_add_pd(_mm_mul_pd(p1, a), _mm_mul_pd(p2, b));
...@@ -414,7 +414,7 @@ public: ...@@ -414,7 +414,7 @@ public:
template <> template <>
class sub<__m128d> { class sub<__m128d> {
public: public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const { INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_sub_pd(a, b); return _mm_sub_pd(a, b);
} }
...@@ -422,7 +422,7 @@ public: ...@@ -422,7 +422,7 @@ public:
template <> template <>
class mul<__m128d> { class mul<__m128d> {
public: public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const { INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_mul_pd(a, b); return _mm_mul_pd(a, b);
} }
...@@ -430,7 +430,7 @@ public: ...@@ -430,7 +430,7 @@ public:
template <> template <>
class div<__m128d> { class div<__m128d> {
public: public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const { INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_div_pd(a, b); return _mm_div_pd(a, b);
} }
...@@ -438,7 +438,7 @@ public: ...@@ -438,7 +438,7 @@ public:
template <> template <>
class min<__m128d> { class min<__m128d> {
public: public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const { INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_min_pd(a, b); return _mm_min_pd(a, b);
} }
...@@ -446,7 +446,7 @@ public: ...@@ -446,7 +446,7 @@ public:
template <> template <>
class max<__m128d> { class max<__m128d> {
public: public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const { INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_max_pd(a, b); return _mm_max_pd(a, b);
} }
...@@ -458,7 +458,7 @@ public: ...@@ -458,7 +458,7 @@ public:
#ifndef PADDLE_TYPE_DOUBLE #ifndef PADDLE_TYPE_DOUBLE
template <> template <>
class add<float32x4_t> { class add<float32x4_t> {
public: public:
INLINE float32x4_t operator()(const float32x4_t a, INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const { const float32x4_t b) const {
return vaddq_f32(a, b); return vaddq_f32(a, b);
...@@ -467,11 +467,11 @@ public: ...@@ -467,11 +467,11 @@ public:
template <> template <>
class add_scale<float32x4_t> { class add_scale<float32x4_t> {
private: private:
const float32x4_t p1; const float32x4_t p1;
const float32x4_t p2; const float32x4_t p2;
public: public:
INLINE add_scale(const float32x4_t s1, const float32x4_t s2) INLINE add_scale(const float32x4_t s1, const float32x4_t s2)
: p1(s1), p2(s2) {} : p1(s1), p2(s2) {}
INLINE float32x4_t operator()(const float32x4_t a, INLINE float32x4_t operator()(const float32x4_t a,
...@@ -482,7 +482,7 @@ public: ...@@ -482,7 +482,7 @@ public:
template <> template <>
class sub<float32x4_t> { class sub<float32x4_t> {
public: public:
INLINE float32x4_t operator()(const float32x4_t a, INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const { const float32x4_t b) const {
return vsubq_f32(a, b); return vsubq_f32(a, b);
...@@ -491,7 +491,7 @@ public: ...@@ -491,7 +491,7 @@ public:
template <> template <>
class mul<float32x4_t> { class mul<float32x4_t> {
public: public:
INLINE float32x4_t operator()(const float32x4_t a, INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const { const float32x4_t b) const {
return vmulq_f32(a, b); return vmulq_f32(a, b);
...@@ -500,7 +500,7 @@ public: ...@@ -500,7 +500,7 @@ public:
template <> template <>
class div<float32x4_t> { class div<float32x4_t> {
public: public:
INLINE float32x4_t operator()(const float32x4_t a, INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const { const float32x4_t b) const {
float32x4_t tmp = vrecpeq_f32(b); float32x4_t tmp = vrecpeq_f32(b);
...@@ -510,7 +510,7 @@ public: ...@@ -510,7 +510,7 @@ public:
template <> template <>
class min<float32x4_t> { class min<float32x4_t> {
public: public:
INLINE float32x4_t operator()(const float32x4_t a, INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const { const float32x4_t b) const {
return vminq_f32(a, b); return vminq_f32(a, b);
...@@ -519,7 +519,7 @@ public: ...@@ -519,7 +519,7 @@ public:
template <> template <>
class max<float32x4_t> { class max<float32x4_t> {
public: public:
INLINE float32x4_t operator()(const float32x4_t a, INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const { const float32x4_t b) const {
return vmaxq_f32(a, b); return vmaxq_f32(a, b);
......
...@@ -30,7 +30,7 @@ bool hl_lstm_sequence_parallel(int frameSize) { ...@@ -30,7 +30,7 @@ bool hl_lstm_sequence_parallel(int frameSize) {
} }
class frameValue { class frameValue {
public: public:
real *value_; real *value_;
__device__ frameValue(real *value) : value_(value) {} __device__ frameValue(real *value) : value_(value) {}
template <int reversed, int frameSize> template <int reversed, int frameSize>
......
...@@ -33,7 +33,7 @@ namespace paddle { ...@@ -33,7 +33,7 @@ namespace paddle {
* \param outputs[0] Image data of NCHW format. * \param outputs[0] Image data of NCHW format.
*/ */
class BlockExpandFunction : public FunctionBase { class BlockExpandFunction : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
// function arguments // function arguments
strides_ = config.get<std::vector<size_t>>("strides"); strides_ = config.get<std::vector<size_t>>("strides");
...@@ -81,7 +81,7 @@ public: ...@@ -81,7 +81,7 @@ public:
(size_t)blockW()}); (size_t)blockW()});
} }
protected: protected:
std::vector<size_t> strides_; std::vector<size_t> strides_;
std::vector<size_t> paddings_; std::vector<size_t> paddings_;
std::vector<size_t> blocks_; std::vector<size_t> blocks_;
...@@ -101,7 +101,7 @@ protected: ...@@ -101,7 +101,7 @@ protected:
template <DeviceType Device> template <DeviceType Device>
class BlockExpandForward : public BlockExpandFunction { class BlockExpandForward : public BlockExpandFunction {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
BlockExpandFunction::init(config); BlockExpandFunction::init(config);
} }
...@@ -149,7 +149,7 @@ public: ...@@ -149,7 +149,7 @@ public:
template <DeviceType Device> template <DeviceType Device>
class BlockExpandBackward : public BlockExpandFunction { class BlockExpandBackward : public BlockExpandFunction {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
BlockExpandFunction::init(config); BlockExpandFunction::init(config);
} }
......
...@@ -63,12 +63,12 @@ enum ArgType { ...@@ -63,12 +63,12 @@ enum ArgType {
ADD_TO = 2, ADD_TO = 2,
}; };
class BufferArg { class BufferArg {
public: public:
void setArgType(ArgType argType) { argType_ = argType; } void setArgType(ArgType argType) { argType_ = argType; }
ArgType getArgType() const { return argType_; } ArgType getArgType() const { return argType_; }
public: public:
BufferArg(ValueType valueType, BufferArg(ValueType valueType,
const TensorShape& shape, const TensorShape& shape,
ArgType argType = UNSPECIFIED) ArgType argType = UNSPECIFIED)
...@@ -169,7 +169,7 @@ public: ...@@ -169,7 +169,7 @@ public:
const SequenceArg& sequence() const; const SequenceArg& sequence() const;
const SparseMatrixArg& sparse() const; const SparseMatrixArg& sparse() const;
protected: protected:
void* buf_; void* buf_;
ValueType valueType_; ValueType valueType_;
TensorShape shape_; TensorShape shape_;
...@@ -185,7 +185,7 @@ protected: ...@@ -185,7 +185,7 @@ protected:
// valueType_ = int32 // valueType_ = int32
// if a < b then value_.buf_[a] < value_.buf_[b] // if a < b then value_.buf_[a] < value_.buf_[b]
class SequenceIdArg : public BufferArg { class SequenceIdArg : public BufferArg {
public: public:
SequenceIdArg(const TensorShape& shape, ArgType argType = UNSPECIFIED) SequenceIdArg(const TensorShape& shape, ArgType argType = UNSPECIFIED)
: BufferArg(VALUE_TYPE_INT32, shape, argType) { : BufferArg(VALUE_TYPE_INT32, shape, argType) {
bufferType_ = TENSOR_SEQUENCE_ID; bufferType_ = TENSOR_SEQUENCE_ID;
...@@ -212,7 +212,7 @@ public: ...@@ -212,7 +212,7 @@ public:
size_t numSeqs() const { return numSeqs_; } size_t numSeqs() const { return numSeqs_; }
private: private:
size_t numSeqs_; size_t numSeqs_;
}; };
...@@ -222,7 +222,7 @@ private: ...@@ -222,7 +222,7 @@ private:
// SequenceArg can be used to represent sequences that contain multiple // SequenceArg can be used to represent sequences that contain multiple
// unequal lengths. // unequal lengths.
class SequenceArg : public BufferArg { class SequenceArg : public BufferArg {
public: public:
SequenceArg(ValueType valueType, SequenceArg(ValueType valueType,
const TensorShape& shape, const TensorShape& shape,
ArgType argType = UNSPECIFIED) ArgType argType = UNSPECIFIED)
...@@ -255,7 +255,7 @@ public: ...@@ -255,7 +255,7 @@ public:
SequenceIdArg& getSequenceId() { return startPositions_; } SequenceIdArg& getSequenceId() { return startPositions_; }
const SequenceIdArg& getSequenceId() const { return startPositions_; } const SequenceIdArg& getSequenceId() const { return startPositions_; }
private: private:
SequenceIdArg startPositions_; SequenceIdArg startPositions_;
}; };
...@@ -263,7 +263,7 @@ private: ...@@ -263,7 +263,7 @@ private:
// valueType_ == float or double // valueType_ == float or double
// shape_.ndims() == 2 // shape_.ndims() == 2
class SparseMatrixArg : public BufferArg { class SparseMatrixArg : public BufferArg {
public: public:
SparseMatrixArg(void* buf, SparseMatrixArg(void* buf,
ValueType valueType, ValueType valueType,
const TensorShape& shape, const TensorShape& shape,
...@@ -353,7 +353,7 @@ public: ...@@ -353,7 +353,7 @@ public:
SparseDataType dataType() const { return type_; } SparseDataType dataType() const { return type_; }
private: private:
BufferArg row_; BufferArg row_;
BufferArg col_; BufferArg col_;
size_t nnz_; size_t nnz_;
......
...@@ -100,7 +100,7 @@ void ContextProjectionForward<DEVICE_TYPE_CPU>(CpuMatrix& out_mat, ...@@ -100,7 +100,7 @@ void ContextProjectionForward<DEVICE_TYPE_CPU>(CpuMatrix& out_mat,
*/ */
template <DeviceType Device> template <DeviceType Device>
class ContextProjectionForwardFunc : public FunctionBase { class ContextProjectionForwardFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length"); context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start"); context_start_ = config.get<int>("context_start");
...@@ -146,7 +146,7 @@ public: ...@@ -146,7 +146,7 @@ public:
begin_pad_); begin_pad_);
} }
private: private:
size_t context_length_; size_t context_length_;
int context_start_; int context_start_;
size_t begin_pad_; size_t begin_pad_;
...@@ -223,7 +223,7 @@ void ContextProjectionBackward<DEVICE_TYPE_CPU>(const CpuMatrix& out_grad_mat, ...@@ -223,7 +223,7 @@ void ContextProjectionBackward<DEVICE_TYPE_CPU>(const CpuMatrix& out_grad_mat,
*/ */
template <DeviceType Device> template <DeviceType Device>
class ContextProjectionBackwardFunc : public FunctionBase { class ContextProjectionBackwardFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length"); context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start"); context_start_ = config.get<int>("context_start");
...@@ -278,7 +278,7 @@ public: ...@@ -278,7 +278,7 @@ public:
total_pad_); total_pad_);
} }
private: private:
size_t context_length_; size_t context_length_;
int context_start_; int context_start_;
size_t begin_pad_; size_t begin_pad_;
...@@ -299,7 +299,7 @@ private: ...@@ -299,7 +299,7 @@ private:
*/ */
template <DeviceType Device> template <DeviceType Device>
class ContextProjectionBackwardDataFunc : public FunctionBase { class ContextProjectionBackwardDataFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length"); context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start"); context_start_ = config.get<int>("context_start");
...@@ -331,7 +331,7 @@ public: ...@@ -331,7 +331,7 @@ public:
out_grad_mat, in_grad_mat, seq_vec, context_length_, context_start_); out_grad_mat, in_grad_mat, seq_vec, context_length_, context_start_);
} }
private: private:
size_t context_length_; size_t context_length_;
int context_start_; int context_start_;
}; };
...@@ -348,7 +348,7 @@ private: ...@@ -348,7 +348,7 @@ private:
*/ */
template <DeviceType Device> template <DeviceType Device>
class ContextProjectionBackwardWeightFunc : public FunctionBase { class ContextProjectionBackwardWeightFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length"); context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start"); context_start_ = config.get<int>("context_start");
...@@ -382,7 +382,7 @@ public: ...@@ -382,7 +382,7 @@ public:
begin_pad_); begin_pad_);
} }
private: private:
size_t context_length_; size_t context_length_;
int context_start_; int context_start_;
size_t begin_pad_; size_t begin_pad_;
......
...@@ -56,7 +56,7 @@ namespace paddle { ...@@ -56,7 +56,7 @@ namespace paddle {
* H and W is height and width of filter. * H and W is height and width of filter.
*/ */
class ConvFunctionBase : public FunctionBase { class ConvFunctionBase : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
// function arguments // function arguments
strides_ = config.get<std::vector<size_t>>("strides"); strides_ = config.get<std::vector<size_t>>("strides");
...@@ -101,7 +101,7 @@ public: ...@@ -101,7 +101,7 @@ public:
} }
} }
protected: protected:
size_t getFilterHeight(const TensorShape& filter) const { size_t getFilterHeight(const TensorShape& filter) const {
return filter[filter.ndims() - 2]; return filter[filter.ndims() - 2];
} }
......
...@@ -97,7 +97,7 @@ class CosSimForwardFunc : public FunctionBase { ...@@ -97,7 +97,7 @@ class CosSimForwardFunc : public FunctionBase {
CosSimForward<Device>(out_mat, in1_mat, in2_mat, scale_); CosSimForward<Device>(out_mat, in1_mat, in2_mat, scale_);
} }
private: private:
real scale_; real scale_;
}; };
...@@ -227,7 +227,7 @@ class CosSimBackwardFunc : public FunctionBase { ...@@ -227,7 +227,7 @@ class CosSimBackwardFunc : public FunctionBase {
out_grad, out_val, in1_val, in2_val, in1_grad, in2_grad, scale_); out_grad, out_val, in1_val, in2_val, in1_grad, in2_grad, scale_);
} }
private: private:
real scale_; real scale_;
}; };
......
...@@ -112,7 +112,7 @@ void CropGrad<DEVICE_TYPE_CPU>(const real* inGrad, ...@@ -112,7 +112,7 @@ void CropGrad<DEVICE_TYPE_CPU>(const real* inGrad,
*/ */
template <DeviceType Device> template <DeviceType Device>
class CropFunc : public FunctionBase { class CropFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { conf_ = config; } void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -130,7 +130,7 @@ public: ...@@ -130,7 +130,7 @@ public:
conf_); conf_);
} }
private: private:
FuncConfig conf_; FuncConfig conf_;
}; };
...@@ -145,7 +145,7 @@ private: ...@@ -145,7 +145,7 @@ private:
template <DeviceType Device> template <DeviceType Device>
class CropGradFunc : public FunctionBase { class CropGradFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { conf_ = config; } void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -163,7 +163,7 @@ public: ...@@ -163,7 +163,7 @@ public:
conf_); conf_);
} }
private: private:
FuncConfig conf_; FuncConfig conf_;
}; };
......
...@@ -160,7 +160,7 @@ void CrossMapNormalGrad<DEVICE_TYPE_CPU>(real* inputsGrad, ...@@ -160,7 +160,7 @@ void CrossMapNormalGrad<DEVICE_TYPE_CPU>(real* inputsGrad,
*/ */
template <DeviceType Device> template <DeviceType Device>
class CrossMapNormalFunc : public FunctionBase { class CrossMapNormalFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
// function arguments // function arguments
size_ = config.get<size_t>("size"); size_ = config.get<size_t>("size");
...@@ -220,7 +220,7 @@ public: ...@@ -220,7 +220,7 @@ public:
return ops; return ops;
} }
private: private:
size_t size_; size_t size_;
real scale_; real scale_;
real pow_; real pow_;
...@@ -260,7 +260,7 @@ private: ...@@ -260,7 +260,7 @@ private:
*/ */
template <DeviceType Device> template <DeviceType Device>
class CrossMapNormalGradFunc : public FunctionBase { class CrossMapNormalGradFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
// function arguments // function arguments
size_ = config.get<size_t>("size"); size_ = config.get<size_t>("size");
...@@ -328,7 +328,7 @@ public: ...@@ -328,7 +328,7 @@ public:
return ops; return ops;
} }
private: private:
size_t size_; size_t size_;
real scale_; real scale_;
real pow_; real pow_;
......
...@@ -19,7 +19,7 @@ namespace paddle { ...@@ -19,7 +19,7 @@ namespace paddle {
template <class T> template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_CPU, T> { class DepthwiseConvFunctor<DEVICE_TYPE_CPU, T> {
public: public:
void operator()(const T* inputData, void operator()(const T* inputData,
const T* filterData, const T* filterData,
int batchSize, int batchSize,
...@@ -43,7 +43,7 @@ public: ...@@ -43,7 +43,7 @@ public:
template <class T> template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_CPU, T> { class DepthwiseConvGradInputFunctor<DEVICE_TYPE_CPU, T> {
public: public:
void operator()(const T* outputGrad, void operator()(const T* outputGrad,
const T* filterData, const T* filterData,
int batchSize, int batchSize,
...@@ -66,7 +66,7 @@ public: ...@@ -66,7 +66,7 @@ public:
template <class T> template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_CPU, T> { class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_CPU, T> {
public: public:
void operator()(const T* outputGrad, void operator()(const T* outputGrad,
const T* inputData, const T* inputData,
int batchSize, int batchSize,
...@@ -93,7 +93,7 @@ public: ...@@ -93,7 +93,7 @@ public:
*/ */
template <DeviceType Device> template <DeviceType Device>
class DepthwiseConvFunction : public ConvFunctionBase { class DepthwiseConvFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
...@@ -156,7 +156,7 @@ public: ...@@ -156,7 +156,7 @@ public:
*/ */
template <DeviceType Device> template <DeviceType Device>
class DepthwiseConvGradInputFunction : public ConvFunctionBase { class DepthwiseConvGradInputFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
...@@ -220,7 +220,7 @@ public: ...@@ -220,7 +220,7 @@ public:
*/ */
template <DeviceType Device> template <DeviceType Device>
class DepthwiseConvGradFilterFunction : public ConvFunctionBase { class DepthwiseConvGradFilterFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
......
...@@ -44,7 +44,7 @@ namespace paddle { ...@@ -44,7 +44,7 @@ namespace paddle {
*/ */
template <DeviceType Device, class T> template <DeviceType Device, class T>
class DepthwiseConvFunctor { class DepthwiseConvFunctor {
public: public:
void operator()(const T* inputData, void operator()(const T* inputData,
const T* filterData, const T* filterData,
int batchSize, int batchSize,
...@@ -89,7 +89,7 @@ public: ...@@ -89,7 +89,7 @@ public:
*/ */
template <DeviceType Device, class T> template <DeviceType Device, class T>
class DepthwiseConvGradInputFunctor { class DepthwiseConvGradInputFunctor {
public: public:
void operator()(const T* outputGrad, void operator()(const T* outputGrad,
const T* filterData, const T* filterData,
int batchSize, int batchSize,
...@@ -135,7 +135,7 @@ public: ...@@ -135,7 +135,7 @@ public:
*/ */
template <DeviceType Device, class T> template <DeviceType Device, class T>
class DepthwiseConvGradFilterFunctor { class DepthwiseConvGradFilterFunctor {
public: public:
void operator()(const T* outputGrad, void operator()(const T* outputGrad,
const T* inputData, const T* inputData,
int batchSize, int batchSize,
......
...@@ -199,7 +199,7 @@ __global__ void ConvolutionDepthwiseFilterBackward(const int num_i, ...@@ -199,7 +199,7 @@ __global__ void ConvolutionDepthwiseFilterBackward(const int num_i,
template <class T> template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_GPU, T> { class DepthwiseConvFunctor<DEVICE_TYPE_GPU, T> {
public: public:
void operator()(const T* inputData, void operator()(const T* inputData,
const T* filterData, const T* filterData,
int batchSize, int batchSize,
...@@ -249,7 +249,7 @@ public: ...@@ -249,7 +249,7 @@ public:
template <class T> template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, T> { class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, T> {
public: public:
void operator()(const T* outputGrad, void operator()(const T* outputGrad,
const T* filterData, const T* filterData,
int batchSize, int batchSize,
...@@ -300,7 +300,7 @@ public: ...@@ -300,7 +300,7 @@ public:
template <class T> template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, T> { class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, T> {
public: public:
void operator()(const T* outputGrad, void operator()(const T* outputGrad,
const T* inputData, const T* inputData,
int batchSize, int batchSize,
......
...@@ -46,7 +46,7 @@ int GetCpuCount() { return 1; } ...@@ -46,7 +46,7 @@ int GetCpuCount() { return 1; }
#endif #endif
class EigenDeviceWarpper { class EigenDeviceWarpper {
public: // NOLINT public: // NOLINT
#if EIGEN_USE_THREADS #if EIGEN_USE_THREADS
static Eigen::ThreadPoolDevice* device() { static Eigen::ThreadPoolDevice* device() {
const int num_cpus = GetCpuCount(); const int num_cpus = GetCpuCount();
......
...@@ -29,7 +29,7 @@ namespace paddle { ...@@ -29,7 +29,7 @@ namespace paddle {
* The argument type of Function::init. * The argument type of Function::init.
*/ */
class FuncConfig { class FuncConfig {
public: public:
template <typename T> template <typename T>
T get(const std::string& key, Error* err = nullptr) const { T get(const std::string& key, Error* err = nullptr) const {
try { try {
...@@ -59,7 +59,7 @@ public: ...@@ -59,7 +59,7 @@ public:
return *this; return *this;
} }
protected: protected:
mutable std::unordered_map<std::string, any> valueMap_; mutable std::unordered_map<std::string, any> valueMap_;
}; };
...@@ -77,7 +77,7 @@ protected: ...@@ -77,7 +77,7 @@ protected:
* in the BufferArgs life time. * in the BufferArgs life time.
*/ */
class BufferArgs { class BufferArgs {
public: public:
BufferArgs() {} BufferArgs() {}
~BufferArgs() { ~BufferArgs() {
...@@ -137,7 +137,7 @@ public: ...@@ -137,7 +137,7 @@ public:
void addArg(SparseMatrixArg& arg) { args_.push_back(&arg); } void addArg(SparseMatrixArg& arg) { args_.push_back(&arg); }
private: private:
std::vector<BufferArg*> args_; std::vector<BufferArg*> args_;
// The BufferArg object is constructed and freed by BufferArgs. // The BufferArg object is constructed and freed by BufferArgs.
std::vector<BufferArg*> _args_; std::vector<BufferArg*> _args_;
...@@ -163,7 +163,7 @@ private: ...@@ -163,7 +163,7 @@ private:
* If Function has more than one output, each output can have different modes. * If Function has more than one output, each output can have different modes.
*/ */
class FunctionBase { class FunctionBase {
public: public:
virtual ~FunctionBase() {} virtual ~FunctionBase() {}
virtual void init(const FuncConfig& config) {} virtual void init(const FuncConfig& config) {}
...@@ -192,7 +192,7 @@ public: ...@@ -192,7 +192,7 @@ public:
static ClassRegistrar<FunctionBase> funcRegistrar_; static ClassRegistrar<FunctionBase> funcRegistrar_;
protected: protected:
// numInputs_ and numOutputs_ represents the maximum // numInputs_ and numOutputs_ represents the maximum
// input and output supported by Function. // input and output supported by Function.
// Some functions are optimized for input and output, // Some functions are optimized for input and output,
......
...@@ -39,7 +39,7 @@ struct Allocator<DEVICE_TYPE_GPU> { ...@@ -39,7 +39,7 @@ struct Allocator<DEVICE_TYPE_GPU> {
// Copy argument1 to argument2 // Copy argument1 to argument2
template <DeviceType DType1, DeviceType DType2> template <DeviceType DType1, DeviceType DType2>
class CopyArgument { class CopyArgument {
public: public:
void operator()(const BufferArg& arg1, BufferArg& arg2) { void operator()(const BufferArg& arg1, BufferArg& arg2) {
CHECK_EQ(arg1.valueType(), arg2.valueType()); CHECK_EQ(arg1.valueType(), arg2.valueType());
CHECK_LE(arg1.shape().getElements(), arg2.shape().getElements()); CHECK_LE(arg1.shape().getElements(), arg2.shape().getElements());
...@@ -95,7 +95,7 @@ public: ...@@ -95,7 +95,7 @@ public:
*/ */
template <DeviceType DType1, DeviceType DType2> template <DeviceType DType1, DeviceType DType2>
class Compare2Function { class Compare2Function {
public: public:
typedef typename test::Allocator<DType1>::type Allocator1; typedef typename test::Allocator<DType1>::type Allocator1;
typedef typename test::Allocator<DType2>::type Allocator2; typedef typename test::Allocator<DType2>::type Allocator2;
typedef typename Tensor<real, DType1>::Vector Vector1; typedef typename Tensor<real, DType1>::Vector Vector1;
...@@ -305,7 +305,7 @@ public: ...@@ -305,7 +305,7 @@ public:
std::shared_ptr<FunctionBase> getFunction2() const { return function2_; } std::shared_ptr<FunctionBase> getFunction2() const { return function2_; }
protected: protected:
// only init cpu argument, gpu argument copy from cpu argument. // only init cpu argument, gpu argument copy from cpu argument.
void initArg(BufferArg& arg) { void initArg(BufferArg& arg) {
Vector1 vector(arg.shape().getElements(), (real*)arg.data()); Vector1 vector(arg.shape().getElements(), (real*)arg.data());
...@@ -381,7 +381,7 @@ protected: ...@@ -381,7 +381,7 @@ protected:
} }
} }
protected: protected:
std::shared_ptr<FunctionBase> function1_; std::shared_ptr<FunctionBase> function1_;
std::shared_ptr<FunctionBase> function2_; std::shared_ptr<FunctionBase> function2_;
std::vector<std::shared_ptr<Allocator1>> func1Memory_; std::vector<std::shared_ptr<Allocator1>> func1Memory_;
...@@ -400,7 +400,7 @@ protected: ...@@ -400,7 +400,7 @@ protected:
class CpuGpuFuncCompare class CpuGpuFuncCompare
: public Compare2Function<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> { : public Compare2Function<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> {
public: public:
CpuGpuFuncCompare(const std::string& name, const FuncConfig& config) CpuGpuFuncCompare(const std::string& name, const FuncConfig& config)
: Compare2Function(name + "-CPU", name + "-GPU", config) {} : Compare2Function(name + "-CPU", name + "-GPU", config) {}
......
...@@ -24,7 +24,7 @@ namespace paddle { ...@@ -24,7 +24,7 @@ namespace paddle {
*/ */
template <DeviceType Device> template <DeviceType Device>
class GemmConvFunction : public ConvFunctionBase { class GemmConvFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
...@@ -136,7 +136,7 @@ public: ...@@ -136,7 +136,7 @@ public:
*/ */
template <DeviceType Device> template <DeviceType Device>
class GemmConvMobileFunction : public ConvFunctionBase { class GemmConvMobileFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
...@@ -297,7 +297,7 @@ public: ...@@ -297,7 +297,7 @@ public:
*/ */
template <DeviceType Device> template <DeviceType Device>
class GemmConvGradInputFunction : public ConvFunctionBase { class GemmConvGradInputFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
...@@ -404,7 +404,7 @@ public: ...@@ -404,7 +404,7 @@ public:
*/ */
template <DeviceType Device> template <DeviceType Device>
class GemmConvGradFilterFunction : public ConvFunctionBase { class GemmConvGradFilterFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
......
...@@ -70,7 +70,7 @@ enum ColFormat { kCFO = 0, kOCF = 1 }; ...@@ -70,7 +70,7 @@ enum ColFormat { kCFO = 0, kOCF = 1 };
*/ */
template <ColFormat Format, DeviceType Device, class T> template <ColFormat Format, DeviceType Device, class T>
class Im2ColFunctor { class Im2ColFunctor {
public: public:
void operator()(const T* imData, void operator()(const T* imData,
const TensorShape& imShape, const TensorShape& imShape,
T* colData, T* colData,
...@@ -85,7 +85,7 @@ public: ...@@ -85,7 +85,7 @@ public:
template <ColFormat Format, DeviceType Device, class T> template <ColFormat Format, DeviceType Device, class T>
class Col2ImFunctor { class Col2ImFunctor {
public: public:
void operator()(T* imData, void operator()(T* imData,
const TensorShape& imShape, const TensorShape& imShape,
const T* colData, const T* colData,
...@@ -100,7 +100,7 @@ public: ...@@ -100,7 +100,7 @@ public:
template <class T> template <class T>
class Im2ColMobileFunctor { class Im2ColMobileFunctor {
public: public:
void operator()(const T* imData, void operator()(const T* imData,
const TensorShape& imShape, const TensorShape& imShape,
T* colData, T* colData,
......
...@@ -23,7 +23,7 @@ namespace paddle { ...@@ -23,7 +23,7 @@ namespace paddle {
*/ */
template <class T> template <class T>
class Im2ColFunctor<kCFO, DEVICE_TYPE_CPU, T> { class Im2ColFunctor<kCFO, DEVICE_TYPE_CPU, T> {
public: public:
void operator()(const T* imData, void operator()(const T* imData,
const TensorShape& imShape, const TensorShape& imShape,
T* colData, T* colData,
...@@ -75,7 +75,7 @@ public: ...@@ -75,7 +75,7 @@ public:
*/ */
template <class T> template <class T>
class Col2ImFunctor<kCFO, DEVICE_TYPE_CPU, T> { class Col2ImFunctor<kCFO, DEVICE_TYPE_CPU, T> {
public: public:
void operator()(T* imData, void operator()(T* imData,
const TensorShape& imShape, const TensorShape& imShape,
const T* colData, const T* colData,
...@@ -130,7 +130,7 @@ template class Col2ImFunctor<kCFO, DEVICE_TYPE_CPU, double>; ...@@ -130,7 +130,7 @@ template class Col2ImFunctor<kCFO, DEVICE_TYPE_CPU, double>;
*/ */
template <class T> template <class T>
class Im2ColFunctor<kOCF, DEVICE_TYPE_CPU, T> { class Im2ColFunctor<kOCF, DEVICE_TYPE_CPU, T> {
public: public:
void operator()(const T* imData, void operator()(const T* imData,
const TensorShape& imShape, const TensorShape& imShape,
T* colData, T* colData,
...@@ -188,7 +188,7 @@ public: ...@@ -188,7 +188,7 @@ public:
*/ */
template <class T> template <class T>
class Col2ImFunctor<kOCF, DEVICE_TYPE_CPU, T> { class Col2ImFunctor<kOCF, DEVICE_TYPE_CPU, T> {
public: public:
void operator()(T* imData, void operator()(T* imData,
const TensorShape& imShape, const TensorShape& imShape,
const T* colData, const T* colData,
......
...@@ -71,7 +71,7 @@ __global__ void im2col(const T* data_im, ...@@ -71,7 +71,7 @@ __global__ void im2col(const T* data_im,
*/ */
template <class T> template <class T>
class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, T> { class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, T> {
public: public:
void operator()(const T* imData, void operator()(const T* imData,
const TensorShape& imShape, const TensorShape& imShape,
T* colData, T* colData,
...@@ -184,7 +184,7 @@ __global__ void col2im(size_t n, ...@@ -184,7 +184,7 @@ __global__ void col2im(size_t n,
*/ */
template <class T> template <class T>
class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, T> { class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, T> {
public: public:
void operator()(T* imData, void operator()(T* imData,
const TensorShape& imShape, const TensorShape& imShape,
const T* colData, const T* colData,
...@@ -292,7 +292,7 @@ __global__ void im2colOCF(const T* imData, ...@@ -292,7 +292,7 @@ __global__ void im2colOCF(const T* imData,
*/ */
template <class T> template <class T>
class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, T> { class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, T> {
public: public:
void operator()(const T* imData, void operator()(const T* imData,
const TensorShape& imShape, const TensorShape& imShape,
T* colData, T* colData,
...@@ -399,7 +399,7 @@ __global__ void col2imOCF(T* imData, ...@@ -399,7 +399,7 @@ __global__ void col2imOCF(T* imData,
*/ */
template <class T> template <class T>
class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, T> { class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, T> {
public: public:
void operator()(T* imData, void operator()(T* imData,
const TensorShape& imShape, const TensorShape& imShape,
const T* colData, const T* colData,
......
...@@ -240,7 +240,7 @@ void MulOp<DEVICE_TYPE_CPU>(CpuMatrix& out, ...@@ -240,7 +240,7 @@ void MulOp<DEVICE_TYPE_CPU>(CpuMatrix& out,
*/ */
template <DeviceType Device> template <DeviceType Device>
class MulFunc : public FunctionBase { class MulFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
aTrans_ = config.get<bool>("aTrans"); aTrans_ = config.get<bool>("aTrans");
bTrans_ = config.get<bool>("bTrans"); bTrans_ = config.get<bool>("bTrans");
...@@ -335,7 +335,7 @@ public: ...@@ -335,7 +335,7 @@ public:
} }
} }
private: private:
bool aTrans_; bool aTrans_;
bool bTrans_; bool bTrans_;
}; };
......
...@@ -24,7 +24,7 @@ namespace paddle { ...@@ -24,7 +24,7 @@ namespace paddle {
*/ */
template <class T> template <class T>
class NaiveConvFunctor { class NaiveConvFunctor {
public: public:
void operator()(const T* inputData, void operator()(const T* inputData,
size_t batchSize, size_t batchSize,
size_t inputChannels, size_t inputChannels,
...@@ -85,7 +85,7 @@ public: ...@@ -85,7 +85,7 @@ public:
template <DeviceType Device> template <DeviceType Device>
class NaiveConvFunction : public ConvFunctionBase { class NaiveConvFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
......
...@@ -132,7 +132,7 @@ static inline PadConf castToPadConf(const FuncConfig& conf) { ...@@ -132,7 +132,7 @@ static inline PadConf castToPadConf(const FuncConfig& conf) {
template <DeviceType Device> template <DeviceType Device>
class PadFunc : public FunctionBase { class PadFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { pad_ = castToPadConf(config); } void init(const FuncConfig& config) override { pad_ = castToPadConf(config); }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -157,7 +157,7 @@ public: ...@@ -157,7 +157,7 @@ public:
pad_); pad_);
} }
private: private:
PadConf pad_; PadConf pad_;
}; };
...@@ -173,7 +173,7 @@ private: ...@@ -173,7 +173,7 @@ private:
template <DeviceType Device> template <DeviceType Device>
class PadGradFunc : public FunctionBase { class PadGradFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { pad_ = castToPadConf(config); } void init(const FuncConfig& config) override { pad_ = castToPadConf(config); }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -201,7 +201,7 @@ public: ...@@ -201,7 +201,7 @@ public:
pad_); pad_);
} }
private: private:
PadConf pad_; PadConf pad_;
}; };
......
...@@ -129,7 +129,7 @@ void RowConvGrad<DEVICE_TYPE_CPU>(const CpuMatrix& outG, ...@@ -129,7 +129,7 @@ void RowConvGrad<DEVICE_TYPE_CPU>(const CpuMatrix& outG,
template <DeviceType Device> template <DeviceType Device>
class RowConvFunc : public FunctionBase { class RowConvFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override {} void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -176,7 +176,7 @@ public: ...@@ -176,7 +176,7 @@ public:
template <DeviceType Device> template <DeviceType Device>
class RowConvGradFunc : public FunctionBase { class RowConvGradFunc : public FunctionBase {
// TODO(qingqing): split into RowConvDataFunc and RowConvWeightFunc // TODO(qingqing): split into RowConvDataFunc and RowConvWeightFunc
public: public:
void init(const FuncConfig& config) override {} void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......
...@@ -92,7 +92,7 @@ void ScaleSubRegionGrad<DEVICE_TYPE_CPU>(const real* inGrad, ...@@ -92,7 +92,7 @@ void ScaleSubRegionGrad<DEVICE_TYPE_CPU>(const real* inGrad,
*/ */
template <DeviceType Device> template <DeviceType Device>
class ScaleSubRegionFunc : public FunctionBase { class ScaleSubRegionFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { conf_ = config; } void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -109,7 +109,7 @@ public: ...@@ -109,7 +109,7 @@ public:
conf_); conf_);
} }
private: private:
FuncConfig conf_; FuncConfig conf_;
}; };
...@@ -124,7 +124,7 @@ private: ...@@ -124,7 +124,7 @@ private:
template <DeviceType Device> template <DeviceType Device>
class ScaleSubRegionGradFunc : public FunctionBase { class ScaleSubRegionGradFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { conf_ = config; } void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -141,7 +141,7 @@ public: ...@@ -141,7 +141,7 @@ public:
conf_); conf_);
} }
private: private:
FuncConfig conf_; FuncConfig conf_;
}; };
......
...@@ -75,7 +75,7 @@ void NHWC2NCHW<DEVICE_TYPE_CPU>(real* outputs, ...@@ -75,7 +75,7 @@ void NHWC2NCHW<DEVICE_TYPE_CPU>(real* outputs,
*/ */
template <DeviceType Device> template <DeviceType Device>
class NCHW2NHWCFunc : public FunctionBase { class NCHW2NHWCFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override {} void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
...@@ -108,7 +108,7 @@ public: ...@@ -108,7 +108,7 @@ public:
*/ */
template <DeviceType Device> template <DeviceType Device>
class NHWC2NCHWFunc : public FunctionBase { class NHWC2NCHWFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override {} void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......
...@@ -22,7 +22,7 @@ namespace paddle { ...@@ -22,7 +22,7 @@ namespace paddle {
* TensorShape used to represent shape of normal tensor. * TensorShape used to represent shape of normal tensor.
*/ */
class TensorShape { class TensorShape {
public: public:
TensorShape() : ndims_(0), nelements_(0) { initDims(0); } TensorShape() : ndims_(0), nelements_(0) { initDims(0); }
TensorShape(size_t ndims) : ndims_(ndims), nelements_(1) { initDims(ndims); }; TensorShape(size_t ndims) : ndims_(ndims), nelements_(1) { initDims(ndims); };
...@@ -80,7 +80,7 @@ public: ...@@ -80,7 +80,7 @@ public:
bool operator!=(const TensorShape& t) const { return !(*this == t); } bool operator!=(const TensorShape& t) const { return !(*this == t); }
private: private:
// compute number of elements // compute number of elements
void numElements() { void numElements() {
nelements_ = 1; nelements_ = 1;
......
...@@ -21,7 +21,7 @@ namespace paddle { ...@@ -21,7 +21,7 @@ namespace paddle {
template <DeviceType Device> template <DeviceType Device>
class NeonDepthwiseConvFunction : public ConvFunctionBase { class NeonDepthwiseConvFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
......
...@@ -21,7 +21,7 @@ namespace paddle { ...@@ -21,7 +21,7 @@ namespace paddle {
template <DeviceType Device> template <DeviceType Device>
class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase { class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
......
...@@ -46,7 +46,7 @@ nnp_convolution_algorithm get_nnp_convolution_algorithm( ...@@ -46,7 +46,7 @@ nnp_convolution_algorithm get_nnp_convolution_algorithm(
template <DeviceType Device> template <DeviceType Device>
class NNPACKConvFunction : public ConvFunctionBase { class NNPACKConvFunction : public ConvFunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override {
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
algorithm_ = get_nnp_convolution_algorithm(config.get<std::string>("algo")); algorithm_ = get_nnp_convolution_algorithm(config.get<std::string>("algo"));
...@@ -231,7 +231,7 @@ public: ...@@ -231,7 +231,7 @@ public:
} }
} }
private: private:
nnp_convolution_algorithm algorithm_; nnp_convolution_algorithm algorithm_;
nnp_convolution_transform_strategy transform_strategy_; nnp_convolution_transform_strategy transform_strategy_;
void* workspaceBuffer_; void* workspaceBuffer_;
......
...@@ -44,10 +44,10 @@ static ClassRegistrar<ActivationFunction> gActivationRegistrar; ...@@ -44,10 +44,10 @@ static ClassRegistrar<ActivationFunction> gActivationRegistrar;
*/ */
#define BEGIN_DEFINE_ACTIVATION(ACTIVATION_NAME) \ #define BEGIN_DEFINE_ACTIVATION(ACTIVATION_NAME) \
class ACTIVATION_CLASS_NAME(ACTIVATION_NAME) : public ActivationFunction { \ class ACTIVATION_CLASS_NAME(ACTIVATION_NAME) : public ActivationFunction { \
private: \ private: \
static const std::string name; \ static const std::string name; \
\ \
public: \ public: \
const std::string& getName() const { return name; } const std::string& getName() const { return name; }
/** /**
* @def END_DEFINE_ACTIVATION * @def END_DEFINE_ACTIVATION
...@@ -70,7 +70,7 @@ static ClassRegistrar<ActivationFunction> gActivationRegistrar; ...@@ -70,7 +70,7 @@ static ClassRegistrar<ActivationFunction> gActivationRegistrar;
* Do nothing when forward/backward. * Do nothing when forward/backward.
*/ */
class IdentityActivation : public ActivationFunction { class IdentityActivation : public ActivationFunction {
public: public:
static const std::string name; static const std::string name;
Error __must_check forward(Argument& act) { Error __must_check forward(Argument& act) {
(void)act; (void)act;
......
...@@ -31,7 +31,7 @@ struct Argument; ...@@ -31,7 +31,7 @@ struct Argument;
* *
*/ */
class ActivationFunction { class ActivationFunction {
public: public:
static ActivationFunction* create(const std::string& type); static ActivationFunction* create(const std::string& type);
static std::vector<std::string> getAllRegisteredTypes(); static std::vector<std::string> getAllRegisteredTypes();
......
...@@ -35,10 +35,10 @@ static ClassRegistrar<ActivationFunction> gMKLDNNActivationRegistrar; ...@@ -35,10 +35,10 @@ static ClassRegistrar<ActivationFunction> gMKLDNNActivationRegistrar;
* @def END_MKLDNN_ACTIVATION * @def END_MKLDNN_ACTIVATION
*/ */
#define END_MKLDNN_ACTIVATION(ACT_TYPE) \ #define END_MKLDNN_ACTIVATION(ACT_TYPE) \
private: \ private: \
static const std::string name; \ static const std::string name; \
\ \
public: \ public: \
const std::string& getName() const { return name; } \ const std::string& getName() const { return name; } \
} \ } \
; \ ; \
...@@ -63,11 +63,11 @@ public: \ ...@@ -63,11 +63,11 @@ public: \
#define DEFINE_MKLDNN_ELTWISE_ACTIVATION( \ #define DEFINE_MKLDNN_ELTWISE_ACTIVATION( \
ACT_TYPE, BASE_CLASS, ALPHA, BWD_ALPHA) \ ACT_TYPE, BASE_CLASS, ALPHA, BWD_ALPHA) \
BEGIN_MKLDNN_ACTIVATION(ACT_TYPE, BASE_CLASS) \ BEGIN_MKLDNN_ACTIVATION(ACT_TYPE, BASE_CLASS) \
private: \ private: \
static const float alpha; \ static const float alpha; \
static const float bwdAlpha; \ static const float bwdAlpha; \
\ \
public: \ public: \
float getAlpha() const { return alpha; } \ float getAlpha() const { return alpha; } \
float getBwdAlpha() const { return bwdAlpha; } \ float getBwdAlpha() const { return bwdAlpha; } \
END_MKLDNN_ACTIVATION(ACT_TYPE) \ END_MKLDNN_ACTIVATION(ACT_TYPE) \
......
...@@ -27,7 +27,7 @@ namespace paddle { ...@@ -27,7 +27,7 @@ namespace paddle {
* including mkldnn_relu, mkldnn_elu, mkldnn_tanh, mkldnn_softmax * including mkldnn_relu, mkldnn_elu, mkldnn_tanh, mkldnn_softmax
*/ */
class MKLDNNActivation : public ActivationFunction { class MKLDNNActivation : public ActivationFunction {
protected: protected:
// input value element count // input value element count
size_t cnt_; size_t cnt_;
// should not merge the resetBwd into resetFwd, // should not merge the resetBwd into resetFwd,
...@@ -43,7 +43,7 @@ protected: ...@@ -43,7 +43,7 @@ protected:
std::vector<mkldnn::primitive> pipelineFwd_; std::vector<mkldnn::primitive> pipelineFwd_;
std::vector<mkldnn::primitive> pipelineBwd_; std::vector<mkldnn::primitive> pipelineBwd_;
public: public:
MKLDNNActivation() : cnt_(0), needResetBwd_(true) {} MKLDNNActivation() : cnt_(0), needResetBwd_(true) {}
~MKLDNNActivation() {} ~MKLDNNActivation() {}
static ActivationFunction* create(const std::string& type); static ActivationFunction* create(const std::string& type);
...@@ -72,7 +72,7 @@ class MKLDNNEltwiseActivation : public MKLDNNActivation { ...@@ -72,7 +72,7 @@ class MKLDNNEltwiseActivation : public MKLDNNActivation {
typedef mkldnn::eltwise_backward eltwise_bwd; typedef mkldnn::eltwise_backward eltwise_bwd;
typedef mkldnn::algorithm algorithm; typedef mkldnn::algorithm algorithm;
protected: protected:
// save the forward primitive desc, which can be used backward // save the forward primitive desc, which can be used backward
std::shared_ptr<eltwise_fwd::primitive_desc> fwdPD_; std::shared_ptr<eltwise_fwd::primitive_desc> fwdPD_;
// eltwise_bwd need src input value // eltwise_bwd need src input value
...@@ -80,7 +80,7 @@ protected: ...@@ -80,7 +80,7 @@ protected:
// use for copy data // use for copy data
std::shared_ptr<mkldnn::reorder> copyInVal_; std::shared_ptr<mkldnn::reorder> copyInVal_;
public: public:
MKLDNNEltwiseActivation() {} MKLDNNEltwiseActivation() {}
~MKLDNNEltwiseActivation() {} ~MKLDNNEltwiseActivation() {}
virtual const std::string& getName() const = 0; virtual const std::string& getName() const = 0;
...@@ -102,12 +102,12 @@ public: ...@@ -102,12 +102,12 @@ public:
class MKLDNNSoftmaxActivation : public MKLDNNActivation { class MKLDNNSoftmaxActivation : public MKLDNNActivation {
typedef mkldnn::softmax_forward softmax_fwd; typedef mkldnn::softmax_forward softmax_fwd;
private: private:
// for backward // for backward
MatrixPtr sftMaxSum_; MatrixPtr sftMaxSum_;
MatrixPtr sftMaxDot_; MatrixPtr sftMaxDot_;
public: public:
MKLDNNSoftmaxActivation() {} MKLDNNSoftmaxActivation() {}
~MKLDNNSoftmaxActivation() {} ~MKLDNNSoftmaxActivation() {}
virtual const std::string& getName() const = 0; virtual const std::string& getName() const = 0;
......
...@@ -71,7 +71,7 @@ typedef std::shared_ptr<BufferBatch> BufferBatchPtr; ...@@ -71,7 +71,7 @@ typedef std::shared_ptr<BufferBatch> BufferBatchPtr;
* @brief Data for batch training a neural network * @brief Data for batch training a neural network
*/ */
class DataBatch { class DataBatch {
public: public:
DataBatch() : size_(0) { data_.clear(); } DataBatch() : size_(0) { data_.clear(); }
/** /**
* @brief Get batch size * @brief Get batch size
...@@ -181,7 +181,7 @@ public: ...@@ -181,7 +181,7 @@ public:
} }
} }
protected: protected:
/** /**
* @brief batch size * @brief batch size
*/ */
...@@ -194,7 +194,7 @@ protected: ...@@ -194,7 +194,7 @@ protected:
}; };
class BufferBatch { class BufferBatch {
public: public:
BufferBatch() { BufferBatch() {
hlStream_ = HPPL_STREAM_DEFAULT; hlStream_ = HPPL_STREAM_DEFAULT;
hlEvent_ = NULL; hlEvent_ = NULL;
...@@ -235,7 +235,7 @@ public: ...@@ -235,7 +235,7 @@ public:
void swap(BufferBatch* bufBatch); void swap(BufferBatch* bufBatch);
void clone(DataBatch* srcBatch, bool useGpu); void clone(DataBatch* srcBatch, bool useGpu);
protected: protected:
DataBatch* batchData_; DataBatch* batchData_;
hl_stream_t hlStream_; hl_stream_t hlStream_;
hl_event_t hlEvent_; hl_event_t hlEvent_;
...@@ -247,7 +247,7 @@ typedef std::shared_ptr<DataProvider> DataProviderPtr; ...@@ -247,7 +247,7 @@ typedef std::shared_ptr<DataProvider> DataProviderPtr;
typedef Queue<BufferBatch*> BufferBatchQueue; typedef Queue<BufferBatch*> BufferBatchQueue;
class DoubleBuffer { class DoubleBuffer {
public: public:
DoubleBuffer(DataProvider* dataPool, bool useGpu, int64_t batchSize = 0); DoubleBuffer(DataProvider* dataPool, bool useGpu, int64_t batchSize = 0);
virtual ~DoubleBuffer(); virtual ~DoubleBuffer();
void removeOneBatch(DataBatch* dataBatch); void removeOneBatch(DataBatch* dataBatch);
...@@ -267,7 +267,7 @@ public: ...@@ -267,7 +267,7 @@ public:
void setPending(bool pending) { pending_ = pending; } void setPending(bool pending) { pending_ = pending; }
protected: protected:
virtual void asyncLoadBatch(); virtual void asyncLoadBatch();
void insertOneBatch(DataBatch* batch); void insertOneBatch(DataBatch* batch);
...@@ -290,7 +290,7 @@ protected: ...@@ -290,7 +290,7 @@ protected:
* one is for input, one is for label. * one is for input, one is for label.
*/ */
class DataProvider { class DataProvider {
public: public:
static ClassRegistrar<DataProvider, DataConfig, ModelConfig, bool> registrar_; static ClassRegistrar<DataProvider, DataConfig, ModelConfig, bool> registrar_;
static DataProvider* create(const DataConfig& config, static DataProvider* create(const DataConfig& config,
const ModelConfig& modelConfig, const ModelConfig& modelConfig,
...@@ -359,7 +359,7 @@ public: ...@@ -359,7 +359,7 @@ public:
*/ */
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch) = 0; virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch) = 0;
protected: protected:
DataConfig config_; DataConfig config_;
bool skipShuffle_; bool skipShuffle_;
float usageRatio_; float usageRatio_;
...@@ -382,7 +382,7 @@ protected: ...@@ -382,7 +382,7 @@ protected:
* necessary configurations such as stream_names * necessary configurations such as stream_names
*/ */
class DummyDataProvider : public DataProvider { class DummyDataProvider : public DataProvider {
public: public:
DummyDataProvider(const DataConfig& config, bool useGpu) DummyDataProvider(const DataConfig& config, bool useGpu)
: DataProvider(config, useGpu) {} : DataProvider(config, useGpu) {}
virtual void shuffle() {} virtual void shuffle() {}
...@@ -399,7 +399,7 @@ public: ...@@ -399,7 +399,7 @@ public:
* Data provider for one input and one integer label. * Data provider for one input and one integer label.
*/ */
class SimpleDataProviderBase : public DataProvider { class SimpleDataProviderBase : public DataProvider {
protected: protected:
/// sample feature dimension /// sample feature dimension
int64_t sampleDim_; int64_t sampleDim_;
/// the number of samples /// the number of samples
...@@ -425,7 +425,7 @@ protected: ...@@ -425,7 +425,7 @@ protected:
RWLock lock_; RWLock lock_;
public: public:
SimpleDataProviderBase(const DataConfig& config, bool useGpu, bool withInfo); SimpleDataProviderBase(const DataConfig& config, bool useGpu, bool withInfo);
~SimpleDataProviderBase() {} ~SimpleDataProviderBase() {}
...@@ -440,7 +440,7 @@ public: ...@@ -440,7 +440,7 @@ public:
/// return the number of samples in the buffer /// return the number of samples in the buffer
int64_t fillBuffer(); int64_t fillBuffer();
protected: protected:
/** /**
* @brief Fill at most size samples into data and label. * @brief Fill at most size samples into data and label.
* *
...@@ -458,12 +458,12 @@ protected: ...@@ -458,12 +458,12 @@ protected:
}; };
class SimpleDataProvider : public SimpleDataProviderBase { class SimpleDataProvider : public SimpleDataProviderBase {
public: public:
SimpleDataProvider(const DataConfig& config, bool useGpu); SimpleDataProvider(const DataConfig& config, bool useGpu);
~SimpleDataProvider(); ~SimpleDataProvider();
virtual void reset(); virtual void reset();
protected: protected:
void loadData(const std::string& fileName); void loadData(const std::string& fileName);
void loadDataFile(const std::string& fileName); void loadDataFile(const std::string& fileName);
virtual int64_t fillBufferImp(real* data, virtual int64_t fillBufferImp(real* data,
...@@ -471,7 +471,7 @@ protected: ...@@ -471,7 +471,7 @@ protected:
int* info, int* info,
int64_t size); int64_t size);
protected: protected:
size_t currentSampleIndex_; size_t currentSampleIndex_;
std::vector<int> labels_; std::vector<int> labels_;
std::vector<real> data_; std::vector<real> data_;
......
...@@ -20,7 +20,7 @@ namespace paddle { ...@@ -20,7 +20,7 @@ namespace paddle {
template <class T> template <class T>
class DataProviderGroup : public DataProvider { class DataProviderGroup : public DataProvider {
protected: protected:
typedef T ProviderType; typedef T ProviderType;
typedef std::shared_ptr<ProviderType> ProviderPtrType; typedef std::shared_ptr<ProviderType> ProviderPtrType;
ProviderPtrType provider_; ProviderPtrType provider_;
...@@ -29,7 +29,7 @@ protected: ...@@ -29,7 +29,7 @@ protected:
std::mutex lock_; std::mutex lock_;
std::unique_ptr<MultiThreadWorker<ProviderType>> loader_; std::unique_ptr<MultiThreadWorker<ProviderType>> loader_;
public: public:
DataProviderGroup(const DataConfig& config, bool useGpu); DataProviderGroup(const DataConfig& config, bool useGpu);
~DataProviderGroup() {} ~DataProviderGroup() {}
...@@ -38,7 +38,7 @@ public: ...@@ -38,7 +38,7 @@ public:
virtual int64_t getSize() { return -1; } virtual int64_t getSize() { return -1; }
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch); virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
private: private:
void startLoader(); void startLoader();
void stopLoader(); void stopLoader();
void forceStopLoader(); void forceStopLoader();
......
...@@ -19,10 +19,10 @@ limitations under the License. */ ...@@ -19,10 +19,10 @@ limitations under the License. */
namespace paddle { namespace paddle {
class MultiDataProvider : public DataProvider { class MultiDataProvider : public DataProvider {
protected: protected:
std::vector<std::unique_ptr<DataProvider>> subDataProviders_; std::vector<std::unique_ptr<DataProvider>> subDataProviders_;
public: public:
MultiDataProvider(const DataConfig& config, MultiDataProvider(const DataConfig& config,
const ModelConfig& modelConfig, const ModelConfig& modelConfig,
bool useGpu); bool useGpu);
...@@ -33,7 +33,7 @@ public: ...@@ -33,7 +33,7 @@ public:
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch); virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
bool isTestMode() const { return isTestMode_; } bool isTestMode() const { return isTestMode_; }
private: private:
int totalDataRatio_; int totalDataRatio_;
bool isTestMode_; bool isTestMode_;
}; };
......
...@@ -28,7 +28,7 @@ namespace paddle { ...@@ -28,7 +28,7 @@ namespace paddle {
* messages from/to i/ostream. * messages from/to i/ostream.
*/ */
class ProtoReader { class ProtoReader {
public: public:
explicit ProtoReader(std::istream* s, bool dataCompression = false) { explicit ProtoReader(std::istream* s, bool dataCompression = false) {
CHECK(s) << "istream pointer is nullptr"; CHECK(s) << "istream pointer is nullptr";
istreamInput_.reset(new google::protobuf::io::IstreamInputStream(s)); istreamInput_.reset(new google::protobuf::io::IstreamInputStream(s));
...@@ -109,7 +109,7 @@ public: ...@@ -109,7 +109,7 @@ public:
return true; return true;
} }
protected: protected:
std::unique_ptr<google::protobuf::io::ZeroCopyInputStream> istreamInput_; std::unique_ptr<google::protobuf::io::ZeroCopyInputStream> istreamInput_;
std::unique_ptr<google::protobuf::io::GzipInputStream> gzipInput_; std::unique_ptr<google::protobuf::io::GzipInputStream> gzipInput_;
std::unique_ptr<google::protobuf::io::CodedInputStream> codedInput_; std::unique_ptr<google::protobuf::io::CodedInputStream> codedInput_;
...@@ -144,7 +144,7 @@ protected: ...@@ -144,7 +144,7 @@ protected:
}; };
class ProtoWriter { class ProtoWriter {
public: public:
explicit ProtoWriter(std::ostream* s, bool dataCompression = false) { explicit ProtoWriter(std::ostream* s, bool dataCompression = false) {
CHECK(s) << "ostream pointer is nullptr"; CHECK(s) << "ostream pointer is nullptr";
ostreamOutput_.reset(new google::protobuf::io::OstreamOutputStream(s)); ostreamOutput_.reset(new google::protobuf::io::OstreamOutputStream(s));
...@@ -168,7 +168,7 @@ public: ...@@ -168,7 +168,7 @@ public:
return ret; return ret;
} }
protected: protected:
std::unique_ptr<google::protobuf::io::ZeroCopyOutputStream> ostreamOutput_; std::unique_ptr<google::protobuf::io::ZeroCopyOutputStream> ostreamOutput_;
std::unique_ptr<google::protobuf::io::GzipOutputStream> gzipOutput_; std::unique_ptr<google::protobuf::io::GzipOutputStream> gzipOutput_;
std::unique_ptr<google::protobuf::io::CodedOutputStream> codedOutput_; std::unique_ptr<google::protobuf::io::CodedOutputStream> codedOutput_;
......
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
class PyDataProvider : public DataProvider { class PyDataProvider : public DataProvider {
public: public:
PyDataProvider(const DataConfig& config, PyDataProvider(const DataConfig& config,
bool useGpu, bool useGpu,
bool loadDataAll = true); bool loadDataAll = true);
...@@ -40,7 +40,7 @@ public: ...@@ -40,7 +40,7 @@ public:
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch); virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
protected: protected:
struct ProtoSlot; struct ProtoSlot;
// return false if each each sample is one sequence, i.e., independent // return false if each each sample is one sequence, i.e., independent
// of other samples. // of other samples.
...@@ -73,7 +73,7 @@ protected: ...@@ -73,7 +73,7 @@ protected:
void resetSlots(); void resetSlots();
void loadData(const std::vector<std::string>& fileList); void loadData(const std::vector<std::string>& fileList);
protected: protected:
struct ProtoSlot { struct ProtoSlot {
SlotDef::SlotType type; SlotDef::SlotType type;
int dim; int dim;
......
...@@ -93,7 +93,7 @@ inline std::ostream& operator<<(std::ostream& os, const SlotHeader& header) { ...@@ -93,7 +93,7 @@ inline std::ostream& operator<<(std::ostream& os, const SlotHeader& header) {
* prepare step, fill data into argument during fill step. * prepare step, fill data into argument during fill step.
*/ */
class IFieldScanner { class IFieldScanner {
public: public:
DISABLE_COPY(IFieldScanner); DISABLE_COPY(IFieldScanner);
/** /**
* Ctor. * Ctor.
...@@ -146,7 +146,7 @@ public: ...@@ -146,7 +146,7 @@ public:
*/ */
static IFieldScanner* create(SlotHeader* header); static IFieldScanner* create(SlotHeader* header);
protected: protected:
SlotHeader* headerPtr_; SlotHeader* headerPtr_;
}; };
...@@ -154,7 +154,7 @@ protected: ...@@ -154,7 +154,7 @@ protected:
* Py Data Provider Cache Interface. * Py Data Provider Cache Interface.
*/ */
class IPyDataProviderCache { class IPyDataProviderCache {
public: public:
virtual ~IPyDataProviderCache() {} virtual ~IPyDataProviderCache() {}
/** /**
...@@ -193,7 +193,7 @@ public: ...@@ -193,7 +193,7 @@ public:
* data. And it support cache strategies. * data. And it support cache strategies.
*/ */
class PyDataProvider2 : public DataProvider { class PyDataProvider2 : public DataProvider {
public: public:
/** /**
* Ctor * Ctor
*/ */
...@@ -234,7 +234,7 @@ public: ...@@ -234,7 +234,7 @@ public:
*/ */
virtual ~PyDataProvider2() { resetImpl(false); } virtual ~PyDataProvider2() { resetImpl(false); }
private: private:
void createPyDataObj(const std::string& model, void createPyDataObj(const std::string& model,
const std::string& className, const std::string& className,
const std::string& fileListName, const std::string& fileListName,
...@@ -435,7 +435,7 @@ private: ...@@ -435,7 +435,7 @@ private:
exit_ = false; exit_ = false;
} }
private: private:
std::unique_ptr<std::thread> loadThread_; std::unique_ptr<std::thread> loadThread_;
std::atomic<bool> exit_; std::atomic<bool> exit_;
std::deque<PyObjectPtr> callingContexts_; std::deque<PyObjectPtr> callingContexts_;
...@@ -461,7 +461,7 @@ private: ...@@ -461,7 +461,7 @@ private:
static PyObjectPtr zeroTuple_; static PyObjectPtr zeroTuple_;
class PositionRandom { class PositionRandom {
public: public:
inline explicit PositionRandom(bool skipRand) inline explicit PositionRandom(bool skipRand)
: eng_(ThreadLocalRandomEngine::get()), skipRand_(skipRand) {} : eng_(ThreadLocalRandomEngine::get()), skipRand_(skipRand) {}
...@@ -476,14 +476,14 @@ private: ...@@ -476,14 +476,14 @@ private:
} }
} }
private: private:
std::default_random_engine& eng_; std::default_random_engine& eng_;
std::unique_ptr<std::uniform_int_distribution<size_t>> dist_; std::unique_ptr<std::uniform_int_distribution<size_t>> dist_;
bool skipRand_; bool skipRand_;
}; };
// DataProvider interface // DataProvider interface
public: public:
/** /**
* Resetting the PyDataProvider. May start reading thread here. * Resetting the PyDataProvider. May start reading thread here.
*/ */
...@@ -666,7 +666,7 @@ REGISTER_DATA_PROVIDER_EX(py2, PyDataProvider2); ...@@ -666,7 +666,7 @@ REGISTER_DATA_PROVIDER_EX(py2, PyDataProvider2);
* Scanner for dense slot. * Scanner for dense slot.
*/ */
class DenseScanner : public IFieldScanner { class DenseScanner : public IFieldScanner {
public: public:
explicit DenseScanner(SlotHeader* ptr) : IFieldScanner(ptr), height_(0) {} explicit DenseScanner(SlotHeader* ptr) : IFieldScanner(ptr), height_(0) {}
/** /**
...@@ -708,7 +708,7 @@ public: ...@@ -708,7 +708,7 @@ public:
++height_; ++height_;
} }
private: private:
size_t height_; size_t height_;
}; };
...@@ -716,7 +716,7 @@ private: ...@@ -716,7 +716,7 @@ private:
* Scanner for index slot * Scanner for index slot
*/ */
class IndexScanner : public IFieldScanner { class IndexScanner : public IFieldScanner {
public: public:
explicit IndexScanner(SlotHeader* ptr) : IFieldScanner(ptr), cnt_(0) {} explicit IndexScanner(SlotHeader* ptr) : IFieldScanner(ptr), cnt_(0) {}
/** /**
...@@ -740,12 +740,12 @@ public: ...@@ -740,12 +740,12 @@ public:
CHECK(ok) << "Cannot cast int " << py::repr(obj); CHECK(ok) << "Cannot cast int " << py::repr(obj);
} }
private: private:
size_t cnt_; size_t cnt_;
}; };
class SparseNonValueScanner : public IFieldScanner { class SparseNonValueScanner : public IFieldScanner {
public: public:
explicit SparseNonValueScanner(SlotHeader* ptr) explicit SparseNonValueScanner(SlotHeader* ptr)
: IFieldScanner(ptr), nnz_(0), height_(0) {} : IFieldScanner(ptr), nnz_(0), height_(0) {}
...@@ -790,7 +790,7 @@ public: ...@@ -790,7 +790,7 @@ public:
++height_; ++height_;
} }
protected: protected:
/** /**
* Set a single sparse index and value. * Set a single sparse index and value.
* @param [out] col sparse index * @param [out] col sparse index
...@@ -809,7 +809,7 @@ protected: ...@@ -809,7 +809,7 @@ protected:
}; };
class SparseValueScanner : public SparseNonValueScanner { class SparseValueScanner : public SparseNonValueScanner {
public: public:
explicit SparseValueScanner(SlotHeader* ptr) : SparseNonValueScanner(ptr) {} explicit SparseValueScanner(SlotHeader* ptr) : SparseNonValueScanner(ptr) {}
virtual void finishPrepare(Argument& argument) { virtual void finishPrepare(Argument& argument) {
...@@ -817,7 +817,7 @@ public: ...@@ -817,7 +817,7 @@ public:
argument.value, height_, headerPtr_->dim, nnz_, FLOAT_VALUE); argument.value, height_, headerPtr_->dim, nnz_, FLOAT_VALUE);
} }
protected: protected:
virtual void setData(int* col, real* dat, PyObject* obj) { virtual void setData(int* col, real* dat, PyObject* obj) {
py::SequenceHelper s(obj); py::SequenceHelper s(obj);
SparseNonValueScanner::setData(col, dat, s[0]); SparseNonValueScanner::setData(col, dat, s[0]);
...@@ -829,7 +829,7 @@ protected: ...@@ -829,7 +829,7 @@ protected:
* Sequence Scanner. Scanner for sequence or sub-sequence. * Sequence Scanner. Scanner for sequence or sub-sequence.
*/ */
class SequenceScanner : public IFieldScanner { class SequenceScanner : public IFieldScanner {
public: public:
/** /**
* Ctor * Ctor
* @param innerScanner inner scanner for each timestep or sub-sequence. * @param innerScanner inner scanner for each timestep or sub-sequence.
...@@ -902,7 +902,7 @@ public: ...@@ -902,7 +902,7 @@ public:
*/ */
virtual void finishFill(Argument& argument) { inner_->finishFill(argument); } virtual void finishFill(Argument& argument) { inner_->finishFill(argument); }
protected: protected:
size_t getSize(PyObject* obj) { size_t getSize(PyObject* obj) {
py::SequenceHelper s(obj); py::SequenceHelper s(obj);
auto sc = dynamic_cast<SequenceScanner*>(inner_.get()); auto sc = dynamic_cast<SequenceScanner*>(inner_.get());
...@@ -917,7 +917,7 @@ protected: ...@@ -917,7 +917,7 @@ protected:
} }
} }
private: private:
std::unique_ptr<IFieldScanner> inner_; std::unique_ptr<IFieldScanner> inner_;
size_t cnt_; size_t cnt_;
std::function<ICpuGpuVectorPtr&(Argument&)> getSeqStartPos_; std::function<ICpuGpuVectorPtr&(Argument&)> getSeqStartPos_;
...@@ -969,7 +969,7 @@ IFieldScanner* IFieldScanner::create(SlotHeader* header) { ...@@ -969,7 +969,7 @@ IFieldScanner* IFieldScanner::create(SlotHeader* header) {
* python every pass. * python every pass.
*/ */
class NoCacheStrategy : public IPyDataProviderCache { class NoCacheStrategy : public IPyDataProviderCache {
public: public:
virtual bool reset() { return true; } virtual bool reset() { return true; }
virtual void drop(std::deque<PyObjectPtr>* data) { data->clear(); } virtual void drop(std::deque<PyObjectPtr>* data) { data->clear(); }
...@@ -984,7 +984,7 @@ public: ...@@ -984,7 +984,7 @@ public:
* The rest passes, will load data from memory. * The rest passes, will load data from memory.
*/ */
class CacheOnePassInMemory : public IPyDataProviderCache { class CacheOnePassInMemory : public IPyDataProviderCache {
public: public:
CacheOnePassInMemory() CacheOnePassInMemory()
: objPool_(new std::deque<PyObjectPtr>()), : objPool_(new std::deque<PyObjectPtr>()),
droppedPool_(new std::deque<PyObjectPtr>()) {} droppedPool_(new std::deque<PyObjectPtr>()) {}
...@@ -1011,7 +1011,7 @@ public: ...@@ -1011,7 +1011,7 @@ public:
virtual std::deque<PyObjectPtr>* load() { return objPool_.get(); } virtual std::deque<PyObjectPtr>* load() { return objPool_.get(); }
private: private:
std::unique_ptr<std::deque<PyObjectPtr>> objPool_; std::unique_ptr<std::deque<PyObjectPtr>> objPool_;
std::unique_ptr<std::deque<PyObjectPtr>> droppedPool_; std::unique_ptr<std::deque<PyObjectPtr>> droppedPool_;
}; };
......
...@@ -22,7 +22,7 @@ namespace paddle { ...@@ -22,7 +22,7 @@ namespace paddle {
* calculate sequence-to-sequence edit distance * calculate sequence-to-sequence edit distance
*/ */
class CTCErrorEvaluator : public Evaluator { class CTCErrorEvaluator : public Evaluator {
private: private:
MatrixPtr outActivations_; MatrixPtr outActivations_;
int numTimes_, numClasses_, numSequences_, blank_; int numTimes_, numClasses_, numSequences_, blank_;
real deletions_, insertions_, substitutions_; real deletions_, insertions_, substitutions_;
...@@ -197,7 +197,7 @@ private: ...@@ -197,7 +197,7 @@ private:
(real)seqClassficationError_ / numSequences_; (real)seqClassficationError_ / numSequences_;
} }
public: public:
CTCErrorEvaluator() CTCErrorEvaluator()
: numTimes_(0), : numTimes_(0),
numClasses_(0), numClasses_(0),
......
...@@ -77,7 +77,7 @@ class ChunkEvaluator : public Evaluator { ...@@ -77,7 +77,7 @@ class ChunkEvaluator : public Evaluator {
std::set<int> excludedChunkTypes_; std::set<int> excludedChunkTypes_;
mutable std::unordered_map<std::string, real> values_; mutable std::unordered_map<std::string, real> values_;
public: public:
virtual void init(const EvaluatorConfig& config) { virtual void init(const EvaluatorConfig& config) {
Evaluator::init(config); Evaluator::init(config);
if (config.chunk_scheme() == "IOB") { if (config.chunk_scheme() == "IOB") {
...@@ -276,7 +276,7 @@ public: ...@@ -276,7 +276,7 @@ public:
return "chunk"; return "chunk";
} }
private: private:
void storeLocalValues() const { void storeLocalValues() const {
CHECK_GE(numOutputSegments_, 0); CHECK_GE(numOutputSegments_, 0);
CHECK_GE(numLabelSegments_, 0); CHECK_GE(numLabelSegments_, 0);
......
...@@ -28,7 +28,7 @@ namespace paddle { ...@@ -28,7 +28,7 @@ namespace paddle {
* The config file api is detection_map_evaluator. * The config file api is detection_map_evaluator.
*/ */
class DetectionMAPEvaluator : public Evaluator { class DetectionMAPEvaluator : public Evaluator {
public: public:
DetectionMAPEvaluator() DetectionMAPEvaluator()
: evaluateDifficult_(false), cpuOutput_(nullptr), cpuLabel_(nullptr) {} : evaluateDifficult_(false), cpuOutput_(nullptr), cpuLabel_(nullptr) {}
...@@ -132,7 +132,7 @@ public: ...@@ -132,7 +132,7 @@ public:
LOG(FATAL) << "Distribute detection evaluation not implemented."; LOG(FATAL) << "Distribute detection evaluation not implemented.";
} }
protected: protected:
void calcTFPos(const size_t batchSize, void calcTFPos(const size_t batchSize,
const vector<map<size_t, vector<NormalizedBBox>>>& allGTBBoxes, const vector<map<size_t, vector<NormalizedBBox>>>& allGTBBoxes,
const vector<map<size_t, vector<pair<real, NormalizedBBox>>>>& const vector<map<size_t, vector<pair<real, NormalizedBBox>>>>&
...@@ -287,7 +287,7 @@ protected: ...@@ -287,7 +287,7 @@ protected:
real getValueImpl() const { return calcMAP(); } real getValueImpl() const { return calcMAP(); }
private: private:
real overlapThreshold_; // overlap threshold when determining whether matched real overlapThreshold_; // overlap threshold when determining whether matched
bool evaluateDifficult_; // whether evaluate difficult ground truth bool evaluateDifficult_; // whether evaluate difficult ground truth
size_t backgroundId_; // class index of background size_t backgroundId_; // class index of background
......
...@@ -38,7 +38,7 @@ void Evaluator::eval(const NeuralNetwork& nn) { ...@@ -38,7 +38,7 @@ void Evaluator::eval(const NeuralNetwork& nn) {
* The config file api is classification_error_evaluator. * The config file api is classification_error_evaluator.
*/ */
class ClassificationErrorEvaluator : public Evaluator { class ClassificationErrorEvaluator : public Evaluator {
public: public:
/* /*
ClassificationErrorEvaluator() : totalScore2_(0) {} ClassificationErrorEvaluator() : totalScore2_(0) {}
...@@ -124,7 +124,7 @@ public: ...@@ -124,7 +124,7 @@ public:
} }
// Evaluator interface // Evaluator interface
protected: protected:
std::string getTypeImpl() const { return "classification_error"; } std::string getTypeImpl() const { return "classification_error"; }
}; };
...@@ -135,7 +135,7 @@ protected: ...@@ -135,7 +135,7 @@ protected:
*/ */
class SequenceClassificationErrorEvaluator class SequenceClassificationErrorEvaluator
: public ClassificationErrorEvaluator { : public ClassificationErrorEvaluator {
public: public:
virtual void updateSamplesNum(const std::vector<Argument>& arguments) { virtual void updateSamplesNum(const std::vector<Argument>& arguments) {
numSamples_ += arguments[0].getNumSequences(); numSamples_ += arguments[0].getNumSequences();
} }
...@@ -166,7 +166,7 @@ public: ...@@ -166,7 +166,7 @@ public:
} }
// Evaluator interface // Evaluator interface
protected: protected:
std::string getTypeImpl() const { return "seq_classification_error"; } std::string getTypeImpl() const { return "seq_classification_error"; }
}; };
REGISTER_EVALUATOR(seq_classification_error, REGISTER_EVALUATOR(seq_classification_error,
...@@ -178,7 +178,7 @@ REGISTER_EVALUATOR(seq_classification_error, ...@@ -178,7 +178,7 @@ REGISTER_EVALUATOR(seq_classification_error,
* The config file api is sum_evaluator. * The config file api is sum_evaluator.
*/ */
class SumEvaluator : public Evaluator { class SumEvaluator : public Evaluator {
public: public:
SumEvaluator() : cpuLabel_(nullptr), cpuWeight_(nullptr) {} SumEvaluator() : cpuLabel_(nullptr), cpuWeight_(nullptr) {}
virtual void updateSamplesNum(const std::vector<Argument>& arguments) { virtual void updateSamplesNum(const std::vector<Argument>& arguments) {
...@@ -255,12 +255,12 @@ public: ...@@ -255,12 +255,12 @@ public:
mergeResultsOfAllClients(client); mergeResultsOfAllClients(client);
} }
private: private:
IVectorPtr cpuLabel_; IVectorPtr cpuLabel_;
MatrixPtr cpuWeight_; MatrixPtr cpuWeight_;
// Evaluator interface // Evaluator interface
protected: protected:
std::string getTypeImpl() const { return "sum"; } std::string getTypeImpl() const { return "sum"; }
}; };
/** /**
...@@ -274,7 +274,7 @@ protected: ...@@ -274,7 +274,7 @@ protected:
* *
*/ */
class ColumnSumEvaluator : public Evaluator { class ColumnSumEvaluator : public Evaluator {
public: public:
explicit ColumnSumEvaluator(int32_t colIdx) explicit ColumnSumEvaluator(int32_t colIdx)
: colIdx_(colIdx), colNum_(0), sum_(nullptr) {} : colIdx_(colIdx), colNum_(0), sum_(nullptr) {}
...@@ -368,13 +368,13 @@ public: ...@@ -368,13 +368,13 @@ public:
client->reduce(&numSamples_, &numSamples_, 1, FLAGS_trainer_id, 0); client->reduce(&numSamples_, &numSamples_, 1, FLAGS_trainer_id, 0);
} }
private: private:
int32_t colIdx_; int32_t colIdx_;
size_t colNum_; size_t colNum_;
MatrixPtr sum_; /* cpu matrix */ MatrixPtr sum_; /* cpu matrix */
// Evaluator interface // Evaluator interface
protected: protected:
std::string getTypeImpl() const { std::string getTypeImpl() const {
if (colIdx_ == -1) if (colIdx_ == -1)
return "last-column-sum"; return "last-column-sum";
...@@ -1018,7 +1018,7 @@ static InitFunction __reg_type_auc_sum__([]() { ...@@ -1018,7 +1018,7 @@ static InitFunction __reg_type_auc_sum__([]() {
* The config file api is value_printer_evaluator. * The config file api is value_printer_evaluator.
*/ */
class ValuePrinter : public NotGetableEvaluator { class ValuePrinter : public NotGetableEvaluator {
public: public:
virtual void eval(const NeuralNetwork& nn) { virtual void eval(const NeuralNetwork& nn) {
for (const std::string& name : config_.input_layers()) { for (const std::string& name : config_.input_layers()) {
nn.getLayer(name)->getOutput().printValueString(LOG(INFO), nn.getLayer(name)->getOutput().printValueString(LOG(INFO),
...@@ -1038,7 +1038,7 @@ REGISTER_EVALUATOR(value_printer, ValuePrinter); ...@@ -1038,7 +1038,7 @@ REGISTER_EVALUATOR(value_printer, ValuePrinter);
* The config file api is gradient_printer_evaluator. * The config file api is gradient_printer_evaluator.
*/ */
class GradientPrinter : public NotGetableEvaluator { class GradientPrinter : public NotGetableEvaluator {
public: public:
virtual void eval(const NeuralNetwork& nn) { virtual void eval(const NeuralNetwork& nn) {
for (const std::string& name : config_.input_layers()) { for (const std::string& name : config_.input_layers()) {
const Argument& argu = nn.getLayer(name)->getOutput(); const Argument& argu = nn.getLayer(name)->getOutput();
...@@ -1061,11 +1061,11 @@ REGISTER_EVALUATOR(gradient_printer, GradientPrinter); ...@@ -1061,11 +1061,11 @@ REGISTER_EVALUATOR(gradient_printer, GradientPrinter);
* The config file api is maxid_printer_evaluator. * The config file api is maxid_printer_evaluator.
*/ */
class MaxIdPrinter : public NotGetableEvaluator { class MaxIdPrinter : public NotGetableEvaluator {
private: private:
IVectorPtr maxIds_; IVectorPtr maxIds_;
MatrixPtr maxValues_; MatrixPtr maxValues_;
public: public:
MaxIdPrinter() {} MaxIdPrinter() {}
virtual void eval(const NeuralNetwork& nn) { virtual void eval(const NeuralNetwork& nn) {
...@@ -1103,12 +1103,12 @@ REGISTER_EVALUATOR(max_id_printer, MaxIdPrinter); ...@@ -1103,12 +1103,12 @@ REGISTER_EVALUATOR(max_id_printer, MaxIdPrinter);
* The config file api is maxframe_printer_evaluator. * The config file api is maxframe_printer_evaluator.
*/ */
class MaxFramePrinter : public NotGetableEvaluator { class MaxFramePrinter : public NotGetableEvaluator {
private: private:
IVectorPtr maxIds_; IVectorPtr maxIds_;
MatrixPtr maxValues_; MatrixPtr maxValues_;
MatrixPtr value_; MatrixPtr value_;
public: public:
MaxFramePrinter() { MaxFramePrinter() {
value_ = value_ =
Matrix::create(nullptr, /* height= */ 1, 1, /* trans= */ false, false); Matrix::create(nullptr, /* height= */ 1, 1, /* trans= */ false, false);
...@@ -1190,7 +1190,7 @@ REGISTER_EVALUATOR(max_frame_printer, MaxFramePrinter); ...@@ -1190,7 +1190,7 @@ REGISTER_EVALUATOR(max_frame_printer, MaxFramePrinter);
* *
*/ */
class SequenceTextPrinter : public NotGetableEvaluator { class SequenceTextPrinter : public NotGetableEvaluator {
private: private:
/// dict_file, which contains a list of tokens /// dict_file, which contains a list of tokens
std::vector<std::string> dict_; std::vector<std::string> dict_;
/// result_file, which is the output file /// result_file, which is the output file
...@@ -1203,7 +1203,7 @@ private: ...@@ -1203,7 +1203,7 @@ private:
/// store the probability associated with each sequence /// store the probability associated with each sequence
std::vector<MatrixPtr> cpuIn_; std::vector<MatrixPtr> cpuIn_;
public: public:
SequenceTextPrinter() {} SequenceTextPrinter() {}
virtual void init(const EvaluatorConfig& config) { virtual void init(const EvaluatorConfig& config) {
...@@ -1334,7 +1334,7 @@ REGISTER_EVALUATOR(seq_text_printer, SequenceTextPrinter); ...@@ -1334,7 +1334,7 @@ REGISTER_EVALUATOR(seq_text_printer, SequenceTextPrinter);
* The config file api is classification_error_printer_evaluator. * The config file api is classification_error_printer_evaluator.
*/ */
class ClassificationErrorPrinter : public ClassificationErrorEvaluator { class ClassificationErrorPrinter : public ClassificationErrorEvaluator {
public: public:
virtual void updateSamplesNum(const std::vector<Argument>& arguments) {} virtual void updateSamplesNum(const std::vector<Argument>& arguments) {}
virtual real evalImp(std::vector<Argument>& arguments) { virtual real evalImp(std::vector<Argument>& arguments) {
......
...@@ -40,7 +40,7 @@ class NeuralNetwork; ...@@ -40,7 +40,7 @@ class NeuralNetwork;
* has been by a trained model. * has been by a trained model.
*/ */
class Evaluator { class Evaluator {
public: public:
static Evaluator* create(const EvaluatorConfig& config); static Evaluator* create(const EvaluatorConfig& config);
Evaluator() : numSamples_(0), totalScore_(0) {} Evaluator() : numSamples_(0), totalScore_(0) {}
...@@ -172,7 +172,7 @@ public: ...@@ -172,7 +172,7 @@ public:
return this->getTypeImpl(); return this->getTypeImpl();
} }
protected: protected:
/** /**
* @brief getValueImpl The simplest way to define getValue result. If this * @brief getValueImpl The simplest way to define getValue result. If this
* evaluator doesn't contain multiple fields, and do not throw any error, just * evaluator doesn't contain multiple fields, and do not throw any error, just
...@@ -191,7 +191,7 @@ protected: ...@@ -191,7 +191,7 @@ protected:
*/ */
virtual std::string getTypeImpl() const { return "base"; } virtual std::string getTypeImpl() const { return "base"; }
protected: protected:
EvaluatorConfig config_; EvaluatorConfig config_;
double numSamples_; double numSamples_;
double totalScore_; double totalScore_;
...@@ -204,7 +204,7 @@ protected: ...@@ -204,7 +204,7 @@ protected:
*/ */
class NotGetableEvaluator : public Evaluator { class NotGetableEvaluator : public Evaluator {
// Evaluator interface // Evaluator interface
public: public:
void getNames(std::vector<std::string>* names) {} void getNames(std::vector<std::string>* names) {}
real getValue(const std::string& name, Error* err) const { real getValue(const std::string& name, Error* err) const {
...@@ -219,7 +219,7 @@ public: ...@@ -219,7 +219,7 @@ public:
}; };
class DummyEvaluator : public Evaluator { class DummyEvaluator : public Evaluator {
public: public:
DummyEvaluator() {} DummyEvaluator() {}
virtual void init(const EvaluatorConfig&) {} virtual void init(const EvaluatorConfig&) {}
virtual void start() {} virtual void start() {}
...@@ -232,7 +232,7 @@ public: ...@@ -232,7 +232,7 @@ public:
virtual void printStats(std::ostream&) const {} virtual void printStats(std::ostream&) const {}
// Evaluator interface // Evaluator interface
protected: protected:
std::string getTypeImpl() const; std::string getTypeImpl() const;
}; };
/** /**
...@@ -251,7 +251,7 @@ protected: ...@@ -251,7 +251,7 @@ protected:
* *
*/ */
class AucEvaluator : public Evaluator { class AucEvaluator : public Evaluator {
public: public:
AucEvaluator(int32_t colIdx) AucEvaluator(int32_t colIdx)
: colIdx_(colIdx), : colIdx_(colIdx),
realColumnIdx_(0), realColumnIdx_(0),
...@@ -269,7 +269,7 @@ public: ...@@ -269,7 +269,7 @@ public:
virtual void distributeEval(ParameterClient2* client); virtual void distributeEval(ParameterClient2* client);
private: private:
static const uint32_t kBinNum_ = (1 << 24) - 1; static const uint32_t kBinNum_ = (1 << 24) - 1;
static const int kNegativeLabel_ = 0; static const int kNegativeLabel_ = 0;
double statPos_[kBinNum_ + 1]; double statPos_[kBinNum_ + 1];
...@@ -292,7 +292,7 @@ private: ...@@ -292,7 +292,7 @@ private:
double calcAuc() const; double calcAuc() const;
// Evaluator interface // Evaluator interface
protected: protected:
real getValueImpl() const; real getValueImpl() const;
std::string getTypeImpl() const; std::string getTypeImpl() const;
}; };
...@@ -305,7 +305,7 @@ protected: ...@@ -305,7 +305,7 @@ protected:
* dense value. * dense value.
*/ */
class RankAucEvaluator : public Evaluator { class RankAucEvaluator : public Evaluator {
public: public:
// evaluate ranking AUC // evaluate ranking AUC
virtual void start(); virtual void start();
...@@ -317,7 +317,7 @@ public: ...@@ -317,7 +317,7 @@ public:
mergeResultsOfAllClients(client); mergeResultsOfAllClients(client);
} }
private: private:
MatrixPtr output_; MatrixPtr output_;
MatrixPtr click_; MatrixPtr click_;
MatrixPtr pv_; MatrixPtr pv_;
...@@ -329,7 +329,7 @@ private: ...@@ -329,7 +329,7 @@ private:
size_t size); size_t size);
// Evaluator interface // Evaluator interface
protected: protected:
std::string getTypeImpl() const; std::string getTypeImpl() const;
}; };
...@@ -344,7 +344,7 @@ protected: ...@@ -344,7 +344,7 @@ protected:
* The config file api is precision_recall_evaluator. * The config file api is precision_recall_evaluator.
*/ */
class PrecisionRecallEvaluator : public Evaluator { class PrecisionRecallEvaluator : public Evaluator {
public: public:
// Evaluate precision, recall and F1 score // Evaluate precision, recall and F1 score
PrecisionRecallEvaluator() PrecisionRecallEvaluator()
: isMultiBinaryLabel_(false), : isMultiBinaryLabel_(false),
...@@ -379,7 +379,7 @@ public: ...@@ -379,7 +379,7 @@ public:
StatsInfo() : TP(0.0), TN(0.0), FP(0.0), FN(0.0) {} StatsInfo() : TP(0.0), TN(0.0), FP(0.0), FN(0.0) {}
}; };
private: private:
bool isMultiBinaryLabel_; bool isMultiBinaryLabel_;
std::vector<StatsInfo> statsInfo_; std::vector<StatsInfo> statsInfo_;
...@@ -444,7 +444,7 @@ private: ...@@ -444,7 +444,7 @@ private:
* The config file api is pnpair_evaluator. * The config file api is pnpair_evaluator.
*/ */
class PnpairEvaluator : public Evaluator { class PnpairEvaluator : public Evaluator {
public: public:
PnpairEvaluator() PnpairEvaluator()
: cpuOutput_(nullptr), : cpuOutput_(nullptr),
cpuLabel_(nullptr), cpuLabel_(nullptr),
...@@ -491,7 +491,7 @@ public: ...@@ -491,7 +491,7 @@ public:
<< " calc total neg pair: " << pairArray_[1]; << " calc total neg pair: " << pairArray_[1];
} }
private: private:
static const uint32_t kPairArrayNum_ = 2; static const uint32_t kPairArrayNum_ = 2;
double pairArray_[kPairArrayNum_]; double pairArray_[kPairArrayNum_];
MatrixPtr cpuOutput_; MatrixPtr cpuOutput_;
...@@ -500,7 +500,7 @@ private: ...@@ -500,7 +500,7 @@ private:
MatrixPtr cpuWeight_; MatrixPtr cpuWeight_;
// Evaluator interface // Evaluator interface
protected: protected:
real getValueImpl() const { real getValueImpl() const {
return pairArray_[0] / ((pairArray_[1] <= 0) ? 1.0 : pairArray_[1]); return pairArray_[0] / ((pairArray_[1] <= 0) ? 1.0 : pairArray_[1]);
} }
......
...@@ -73,7 +73,7 @@ class GradientMachine; ...@@ -73,7 +73,7 @@ class GradientMachine;
typedef std::shared_ptr<GradientMachine> GradientMachinePtr; typedef std::shared_ptr<GradientMachine> GradientMachinePtr;
class GradientMachine { class GradientMachine {
public: public:
enum CreateMode { enum CreateMode {
kNormal = 0, kNormal = 0,
kSgdSparseCpuTraining = 3, kSgdSparseCpuTraining = 3,
...@@ -240,7 +240,7 @@ public: ...@@ -240,7 +240,7 @@ public:
*/ */
virtual void releaseOutput() {} virtual void releaseOutput() {}
protected: protected:
virtual void onLoadParameter() {} virtual void onLoadParameter() {}
std::vector<ParameterPtr> parameters_; std::vector<ParameterPtr> parameters_;
......
...@@ -19,14 +19,14 @@ limitations under the License. */ ...@@ -19,14 +19,14 @@ limitations under the License. */
namespace paddle { namespace paddle {
class IGradientMachineMode { class IGradientMachineMode {
public: public:
virtual ~IGradientMachineMode() {} virtual ~IGradientMachineMode() {}
public: // interfaces public: // interfaces
/** /**
* @brief create current mode's gradient machine by model config. * @brief create current mode's gradient machine by model config.
* @param config model config * @param config model config
*/ */
virtual GradientMachine* create(const ModelConfig& config) = 0; virtual GradientMachine* create(const ModelConfig& config) = 0;
/** /**
...@@ -55,14 +55,14 @@ public: // interfaces ...@@ -55,14 +55,14 @@ public: // interfaces
*/ */
virtual bool needTrainWholeDataInOneBatch() const = 0; virtual bool needTrainWholeDataInOneBatch() const = 0;
public: // static methods. public: // static methods.
/** /**
* @brief register a custom gradient machine mode. * @brief register a custom gradient machine mode.
* @note For user to register a custom gradient machine mode, id should >= * @note For user to register a custom gradient machine mode, id should >=
* kCustom. * kCustom.
* @param mode mode id. * @param mode mode id.
* @param ptr mode description object. * @param ptr mode description object.
*/ */
static void regGradientMachineMode( static void regGradientMachineMode(
int32_t mode, std::unique_ptr<IGradientMachineMode>&& ptr) { int32_t mode, std::unique_ptr<IGradientMachineMode>&& ptr) {
modes_.insert(std::make_pair(mode, std::move(ptr))); modes_.insert(std::make_pair(mode, std::move(ptr)));
...@@ -141,7 +141,7 @@ public: // static methods. ...@@ -141,7 +141,7 @@ public: // static methods.
} }
} }
private: private:
static std::unordered_map<int32_t, std::unique_ptr<IGradientMachineMode>> static std::unordered_map<int32_t, std::unique_ptr<IGradientMachineMode>>
modes_; modes_;
}; };
......
...@@ -166,7 +166,7 @@ struct GradBuffer { ...@@ -166,7 +166,7 @@ struct GradBuffer {
* the merged gradient to parameter server. * the merged gradient to parameter server.
*/ */
class MultiGradientMachine : public GradientMachine { class MultiGradientMachine : public GradientMachine {
public: public:
enum TaskType { enum TaskType {
TASK_FORWARD_BACKWARD = 0, TASK_FORWARD_BACKWARD = 0,
TASK_FORWARD = 1, TASK_FORWARD = 1,
...@@ -213,7 +213,7 @@ public: ...@@ -213,7 +213,7 @@ public:
/// The gradietns will be copied to each thread in the computing threads. /// The gradietns will be copied to each thread in the computing threads.
virtual void setOutputGrad(const std::vector<Argument>& args); virtual void setOutputGrad(const std::vector<Argument>& args);
protected: protected:
friend class TrainerThread; friend class TrainerThread;
std::vector<TrainerThreadPtr>& getAllThreads() { return threads_; } std::vector<TrainerThreadPtr>& getAllThreads() { return threads_; }
...@@ -281,7 +281,7 @@ protected: ...@@ -281,7 +281,7 @@ protected:
int paraMainThread(int pid) const { return paraMainThread_[pid]; } int paraMainThread(int pid) const { return paraMainThread_[pid]; }
protected: protected:
virtual void forwardImp(const std::vector<Argument>& inArgs, virtual void forwardImp(const std::vector<Argument>& inArgs,
std::vector<Argument>* outArgs, std::vector<Argument>* outArgs,
PassType passType, PassType passType,
...@@ -298,7 +298,7 @@ protected: ...@@ -298,7 +298,7 @@ protected:
void allocGradBufs(); void allocGradBufs();
protected: protected:
bool useGpu_; bool useGpu_;
bool hasNonstaticCpuParamters_; bool hasNonstaticCpuParamters_;
...@@ -342,7 +342,7 @@ protected: ...@@ -342,7 +342,7 @@ protected:
}; };
class TrainerThread { class TrainerThread {
public: public:
TrainerThread(const ModelConfig& config, TrainerThread(const ModelConfig& config,
int threadId, int threadId,
MultiGradientMachine* multiMachine); MultiGradientMachine* multiMachine);
...@@ -392,7 +392,7 @@ public: ...@@ -392,7 +392,7 @@ public:
/// Whether the thread has input data. /// Whether the thread has input data.
bool hasInputData() { return batchSize_ != 0; } bool hasInputData() { return batchSize_ != 0; }
protected: protected:
void mergeCpuGradients(); void mergeCpuGradients();
void mergeGradSparse( void mergeGradSparse(
...@@ -421,7 +421,7 @@ protected: ...@@ -421,7 +421,7 @@ protected:
/// GradientMachine::backward /// GradientMachine::backward
void doCallback(int pid); void doCallback(int pid);
protected: protected:
MultiGradientMachine* multiMachine_; MultiGradientMachine* multiMachine_;
ModelConfig config_; ModelConfig config_;
/// whether the thread should stop /// whether the thread should stop
......
...@@ -122,7 +122,7 @@ void MultiNetwork::finish() { ...@@ -122,7 +122,7 @@ void MultiNetwork::finish() {
} }
class MultiCombinedEvaluator : public Evaluator { class MultiCombinedEvaluator : public Evaluator {
public: public:
MultiCombinedEvaluator() {} MultiCombinedEvaluator() {}
void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) { void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) {
evaluators_.emplace_back(std::move(evaluator)); evaluators_.emplace_back(std::move(evaluator));
...@@ -167,7 +167,7 @@ public: ...@@ -167,7 +167,7 @@ public:
} }
} }
protected: protected:
std::vector<std::unique_ptr<Evaluator>> evaluators_; std::vector<std::unique_ptr<Evaluator>> evaluators_;
}; };
......
...@@ -22,7 +22,7 @@ limitations under the License. */ ...@@ -22,7 +22,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
class MultiNetwork : public NeuralNetwork { class MultiNetwork : public NeuralNetwork {
public: public:
explicit MultiNetwork(std::string subModelName = "") explicit MultiNetwork(std::string subModelName = "")
: NeuralNetwork(subModelName) {} : NeuralNetwork(subModelName) {}
...@@ -58,7 +58,7 @@ public: ...@@ -58,7 +58,7 @@ public:
virtual void finish(); virtual void finish();
protected: protected:
std::vector<std::unique_ptr<NeuralNetwork>> subNetworks_; std::vector<std::unique_ptr<NeuralNetwork>> subNetworks_;
}; };
} // namespace paddle } // namespace paddle
...@@ -362,7 +362,7 @@ void NeuralNetwork::releaseOutput() { ...@@ -362,7 +362,7 @@ void NeuralNetwork::releaseOutput() {
#ifndef PADDLE_MOBILE_INFERENCE #ifndef PADDLE_MOBILE_INFERENCE
class CombinedEvaluator : public Evaluator { class CombinedEvaluator : public Evaluator {
public: public:
void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) { void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) {
evaluators_.emplace_back(std::move(evaluator)); evaluators_.emplace_back(std::move(evaluator));
} }
...@@ -400,11 +400,11 @@ public: ...@@ -400,11 +400,11 @@ public:
} }
} }
protected: protected:
std::vector<std::unique_ptr<Evaluator>> evaluators_; std::vector<std::unique_ptr<Evaluator>> evaluators_;
// Evaluator interface // Evaluator interface
public: public:
/** /**
* @brief getNames will return all inside evaluators' names. * @brief getNames will return all inside evaluators' names.
* @param names [out]: return names. * @param names [out]: return names.
...@@ -435,7 +435,7 @@ public: ...@@ -435,7 +435,7 @@ public:
}); });
} }
private: private:
template <typename T> template <typename T>
T getMethodHelper(const std::string& name, T getMethodHelper(const std::string& name,
Error* err, Error* err,
...@@ -454,7 +454,7 @@ private: ...@@ -454,7 +454,7 @@ private:
}; };
class SubnetEvaluator : public CombinedEvaluator { class SubnetEvaluator : public CombinedEvaluator {
public: public:
SubnetEvaluator(const std::string& layerName, SubnetEvaluator(const std::string& layerName,
std::unique_ptr<Evaluator>&& evaluator) std::unique_ptr<Evaluator>&& evaluator)
: layerName_(layerName) { : layerName_(layerName) {
...@@ -473,7 +473,7 @@ public: ...@@ -473,7 +473,7 @@ public:
<< " in submodel " << nn.getName(); << " in submodel " << nn.getName();
} }
protected: protected:
std::string layerName_; std::string layerName_;
}; };
......
...@@ -56,7 +56,7 @@ void parameterInitNN(int paramId, ...@@ -56,7 +56,7 @@ void parameterInitNN(int paramId,
std::vector<ParameterPtr>* sharedParams); std::vector<ParameterPtr>* sharedParams);
class NeuralNetwork : public GradientMachine { class NeuralNetwork : public GradientMachine {
public: public:
virtual void init(const ModelConfig& config, virtual void init(const ModelConfig& config,
ParamInitCallback callback = nullptr, ParamInitCallback callback = nullptr,
const std::vector<ParameterType>& parameterTypes = const std::vector<ParameterType>& parameterTypes =
...@@ -144,7 +144,7 @@ public: ...@@ -144,7 +144,7 @@ public:
*/ */
void releaseOutput(); void releaseOutput();
protected: protected:
/** /**
* The constructor of NeuralNetwork. * The constructor of NeuralNetwork.
* The sub networks can get parameters_ and parameterMap_ * The sub networks can get parameters_ and parameterMap_
......
...@@ -32,7 +32,7 @@ enum TaskType { ...@@ -32,7 +32,7 @@ enum TaskType {
* multiple threads in parallel. * multiple threads in parallel.
*/ */
class ParallelNeuralNetwork : public NeuralNetwork { class ParallelNeuralNetwork : public NeuralNetwork {
public: public:
ParallelNeuralNetwork(std::string subModelName = "", ParallelNeuralNetwork(std::string subModelName = "",
NeuralNetwork *rootNetwork = nullptr) NeuralNetwork *rootNetwork = nullptr)
: NeuralNetwork(subModelName, rootNetwork) {} : NeuralNetwork(subModelName, rootNetwork) {}
...@@ -66,7 +66,7 @@ public: ...@@ -66,7 +66,7 @@ public:
// virtual void eval(Evaluator* evaluator); // virtual void eval(Evaluator* evaluator);
protected: protected:
bool useGpu_; bool useGpu_;
/// number of gpu devices /// number of gpu devices
int numDevices_; int numDevices_;
...@@ -74,7 +74,7 @@ protected: ...@@ -74,7 +74,7 @@ protected:
}; };
class ParallelThread { class ParallelThread {
public: public:
ParallelThread(int threadId, int deviceId, bool useGpu); ParallelThread(int threadId, int deviceId, bool useGpu);
~ParallelThread(); ~ParallelThread();
void jobEnqueue(LayerPtr layer, TaskType task); void jobEnqueue(LayerPtr layer, TaskType task);
...@@ -87,10 +87,10 @@ public: ...@@ -87,10 +87,10 @@ public:
} }
void setForwardPassType(PassType passType) { passType_ = passType; } void setForwardPassType(PassType passType) { passType_ = passType; }
protected: protected:
void computeThread(); void computeThread();
public: public:
struct Job { struct Job {
LayerPtr layer_; LayerPtr layer_;
TaskType task_; TaskType task_;
...@@ -98,7 +98,7 @@ public: ...@@ -98,7 +98,7 @@ public:
typedef Queue<Job> JobQueue; typedef Queue<Job> JobQueue;
JobQueue queue_; JobQueue queue_;
protected: protected:
/// from 0 to threads-1 /// from 0 to threads-1
int threadId_; int threadId_;
/// the GPU device Id which the computeThread_ used /// the GPU device Id which the computeThread_ used
......
...@@ -96,7 +96,7 @@ static InitFunction __init__diy_prob_method( ...@@ -96,7 +96,7 @@ static InitFunction __init__diy_prob_method(
std::numeric_limits<int>::max()); std::numeric_limits<int>::max());
class BeamSearchControlCallbacks { class BeamSearchControlCallbacks {
public: public:
RecurrentGradientMachine::BeamSearchCandidatesAdjustCallback RecurrentGradientMachine::BeamSearchCandidatesAdjustCallback
beamSearchCandidateAdjust; beamSearchCandidateAdjust;
RecurrentGradientMachine::NormOrDropNodeCallback normOrDropNode; RecurrentGradientMachine::NormOrDropNodeCallback normOrDropNode;
...@@ -115,7 +115,7 @@ public: ...@@ -115,7 +115,7 @@ public:
}; };
class BeamSearchStatisticsCallbacks { class BeamSearchStatisticsCallbacks {
public: public:
RecurrentGradientMachine::EachStepCallback onEachStepStarted; RecurrentGradientMachine::EachStepCallback onEachStepStarted;
RecurrentGradientMachine::EachStepCallback onEachStepStoped; RecurrentGradientMachine::EachStepCallback onEachStepStoped;
...@@ -148,11 +148,11 @@ RecurrentGradientMachine::RecurrentGradientMachine( ...@@ -148,11 +148,11 @@ RecurrentGradientMachine::RecurrentGradientMachine(
* so it's should not be placed in root network. * so it's should not be placed in root network.
*/ */
class BootBiasLayer : public Layer { class BootBiasLayer : public Layer {
protected: protected:
std::unique_ptr<Weight> biases_; std::unique_ptr<Weight> biases_;
IVectorPtr cpuIds_; IVectorPtr cpuIds_;
public: public:
explicit BootBiasLayer(const LayerConfig& config) : Layer(config) {} explicit BootBiasLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
......
...@@ -30,7 +30,7 @@ class BeamSearchControlCallbacks; ...@@ -30,7 +30,7 @@ class BeamSearchControlCallbacks;
class BeamSearchStatisticsCallbacks; class BeamSearchStatisticsCallbacks;
class RecurrentGradientMachine : public NeuralNetwork { class RecurrentGradientMachine : public NeuralNetwork {
public: public:
RecurrentGradientMachine(const std::string& subModelName, RecurrentGradientMachine(const std::string& subModelName,
NeuralNetwork* rootNetwork); NeuralNetwork* rootNetwork);
...@@ -290,7 +290,7 @@ public: ...@@ -290,7 +290,7 @@ public:
return this->finalPaths_; return this->finalPaths_;
} }
protected: protected:
std::vector<Argument::SeqInfo> commonSeqInfo_; std::vector<Argument::SeqInfo> commonSeqInfo_;
ICpuGpuVectorPtr sequenceStartPositions_; ICpuGpuVectorPtr sequenceStartPositions_;
void calcSequenceStartPositions(); void calcSequenceStartPositions();
...@@ -447,7 +447,7 @@ protected: ...@@ -447,7 +447,7 @@ protected:
MatrixPtr cpuProb_; MatrixPtr cpuProb_;
IVectorPtr cpuEos_; IVectorPtr cpuEos_;
private: private:
/* /*
* @return beam size in beam search * @return beam size in beam search
*/ */
......
...@@ -33,10 +33,10 @@ namespace paddle { ...@@ -33,10 +33,10 @@ namespace paddle {
* The config file api is addto_layer. * The config file api is addto_layer.
*/ */
class AddtoLayer : public Layer { class AddtoLayer : public Layer {
protected: protected:
std::unique_ptr<Weight> biases_; std::unique_ptr<Weight> biases_;
public: public:
explicit AddtoLayer(const LayerConfig& config) : Layer(config) {} explicit AddtoLayer(const LayerConfig& config) : Layer(config) {}
~AddtoLayer() {} ~AddtoLayer() {}
......
...@@ -26,11 +26,11 @@ namespace paddle { ...@@ -26,11 +26,11 @@ namespace paddle {
* called to set one and only one real layer * called to set one and only one real layer
*/ */
class AgentLayer : public Layer { class AgentLayer : public Layer {
protected: protected:
LayerPtr realLayer_; LayerPtr realLayer_;
int numSamples_; int numSamples_;
public: public:
explicit AgentLayer(const LayerConfig& config) : Layer(config) {} explicit AgentLayer(const LayerConfig& config) : Layer(config) {}
~AgentLayer() {} ~AgentLayer() {}
...@@ -55,14 +55,14 @@ public: ...@@ -55,14 +55,14 @@ public:
* GatherAgentLayer collect a complete sequence. * GatherAgentLayer collect a complete sequence.
*/ */
class GatherAgentLayer : public Layer { class GatherAgentLayer : public Layer {
protected: protected:
std::vector<LayerPtr> realLayers_; std::vector<LayerPtr> realLayers_;
std::vector<IVectorPtr> idsVec_; std::vector<IVectorPtr> idsVec_;
// we don't clear idsVec_ vector to aviod IVector alloc/free // we don't clear idsVec_ vector to aviod IVector alloc/free
IVectorPtr allIds_; IVectorPtr allIds_;
std::vector<int> idIndex_; std::vector<int> idIndex_;
public: public:
explicit GatherAgentLayer(const LayerConfig& config) : Layer(config) {} explicit GatherAgentLayer(const LayerConfig& config) : Layer(config) {}
virtual ~GatherAgentLayer() {} virtual ~GatherAgentLayer() {}
...@@ -95,7 +95,7 @@ public: ...@@ -95,7 +95,7 @@ public:
* if it is, the agent will select a few ids in real layer. * if it is, the agent will select a few ids in real layer.
*/ */
class ScatterAgentLayer : public Layer { class ScatterAgentLayer : public Layer {
protected: protected:
LayerPtr realLayer_; LayerPtr realLayer_;
IVectorPtr ids_; IVectorPtr ids_;
IVectorPtr cpuIds_; IVectorPtr cpuIds_;
...@@ -113,7 +113,7 @@ protected: ...@@ -113,7 +113,7 @@ protected:
// true for setRealLayer, false for setRealLayerAndOutput // true for setRealLayer, false for setRealLayerAndOutput
bool selectionMode_; bool selectionMode_;
public: public:
explicit ScatterAgentLayer(const LayerConfig& config) : Layer(config) {} explicit ScatterAgentLayer(const LayerConfig& config) : Layer(config) {}
virtual ~ScatterAgentLayer() {} virtual ~ScatterAgentLayer() {}
......
...@@ -37,7 +37,7 @@ namespace paddle { ...@@ -37,7 +37,7 @@ namespace paddle {
* The config file api is pooling_layer. * The config file api is pooling_layer.
*/ */
class AverageLayer : public SequencePoolLayer { class AverageLayer : public SequencePoolLayer {
public: public:
enum AverageStrategy { kAverage = 0, kSum = 1, kAverageSquareRootN = 2 }; enum AverageStrategy { kAverage = 0, kSum = 1, kAverageSquareRootN = 2 };
explicit AverageLayer(const LayerConfig& config) explicit AverageLayer(const LayerConfig& config)
: SequencePoolLayer(config) {} : SequencePoolLayer(config) {}
...@@ -48,7 +48,7 @@ public: ...@@ -48,7 +48,7 @@ public:
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
int mode_; int mode_;
}; };
} // namespace paddle } // namespace paddle
...@@ -40,7 +40,7 @@ namespace paddle { ...@@ -40,7 +40,7 @@ namespace paddle {
*/ */
class BatchNormBaseLayer : public Layer { class BatchNormBaseLayer : public Layer {
public: public:
explicit BatchNormBaseLayer(const LayerConfig& config) : Layer(config) {} explicit BatchNormBaseLayer(const LayerConfig& config) : Layer(config) {}
~BatchNormBaseLayer() {} ~BatchNormBaseLayer() {}
...@@ -61,7 +61,7 @@ public: ...@@ -61,7 +61,7 @@ public:
*/ */
void calFeatureMapSize(); void calFeatureMapSize();
protected: protected:
/// Batch normalization scale parameter, which is referred to as gamma in /// Batch normalization scale parameter, which is referred to as gamma in
/// in original paper. /// in original paper.
std::unique_ptr<Weight> weight_; std::unique_ptr<Weight> weight_;
......
...@@ -27,7 +27,7 @@ namespace paddle { ...@@ -27,7 +27,7 @@ namespace paddle {
*/ */
class BatchNormalizationLayer : public BatchNormBaseLayer { class BatchNormalizationLayer : public BatchNormBaseLayer {
public: public:
explicit BatchNormalizationLayer(const LayerConfig& config) explicit BatchNormalizationLayer(const LayerConfig& config)
: BatchNormBaseLayer(config), firstTest_(true) {} : BatchNormBaseLayer(config), firstTest_(true) {}
...@@ -38,7 +38,7 @@ public: ...@@ -38,7 +38,7 @@ public:
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
/// Load pre-calculated mean and std. /// Load pre-calculated mean and std.
void setMeanAndStd(); void setMeanAndStd();
......
...@@ -26,13 +26,13 @@ namespace paddle { ...@@ -26,13 +26,13 @@ namespace paddle {
* @note The config file api is bilinear_interp_layer. * @note The config file api is bilinear_interp_layer.
*/ */
class BilinearInterpLayer : public Layer { class BilinearInterpLayer : public Layer {
protected: protected:
size_t outImgH_, outImgW_; size_t outImgH_, outImgW_;
size_t inImgH_, inImgW_; size_t inImgH_, inImgW_;
real ratioH_, ratioW_; real ratioH_, ratioW_;
size_t numChannels_; size_t numChannels_;
public: public:
explicit BilinearInterpLayer(const LayerConfig& config) : Layer(config) {} explicit BilinearInterpLayer(const LayerConfig& config) : Layer(config) {}
virtual ~BilinearInterpLayer() {} virtual ~BilinearInterpLayer() {}
......
...@@ -40,7 +40,7 @@ namespace paddle { ...@@ -40,7 +40,7 @@ namespace paddle {
* The config file api is block_expand_layer. * The config file api is block_expand_layer.
*/ */
class BlockExpandLayer : public Layer { class BlockExpandLayer : public Layer {
protected: protected:
/** /**
* @brief Calculate outputH_ and outputW_ and return block number which * @brief Calculate outputH_ and outputW_ and return block number which
* actually is time steps. * actually is time steps.
...@@ -53,7 +53,7 @@ protected: ...@@ -53,7 +53,7 @@ protected:
TensorShape inputShape_; TensorShape inputShape_;
TensorShape outputShape_; TensorShape outputShape_;
public: public:
explicit BlockExpandLayer(const LayerConfig& config) : Layer(config) {} explicit BlockExpandLayer(const LayerConfig& config) : Layer(config) {}
~BlockExpandLayer() {} ~BlockExpandLayer() {}
......
...@@ -30,14 +30,14 @@ namespace paddle { ...@@ -30,14 +30,14 @@ namespace paddle {
* See LinearChainCRF.h for the detail of the CRF formulation. * See LinearChainCRF.h for the detail of the CRF formulation.
*/ */
class CRFDecodingLayer : public CRFLayer { class CRFDecodingLayer : public CRFLayer {
public: public:
explicit CRFDecodingLayer(const LayerConfig& config) : CRFLayer(config) {} explicit CRFDecodingLayer(const LayerConfig& config) : CRFLayer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override; void backward(const UpdateCallback& callback) override;
protected: protected:
std::unique_ptr<LinearChainCRF> crf_; std::unique_ptr<LinearChainCRF> crf_;
}; };
......
...@@ -27,14 +27,14 @@ namespace paddle { ...@@ -27,14 +27,14 @@ namespace paddle {
* See class LinearChainCRF for the detail of the CRF formulation. * See class LinearChainCRF for the detail of the CRF formulation.
*/ */
class CRFLayer : public Layer { class CRFLayer : public Layer {
public: public:
explicit CRFLayer(const LayerConfig& config) : Layer(config) {} explicit CRFLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override; void backward(const UpdateCallback& callback) override;
protected: protected:
size_t numClasses_; size_t numClasses_;
ParameterPtr parameter_; ParameterPtr parameter_;
std::vector<LinearChainCRF> crfs_; std::vector<LinearChainCRF> crfs_;
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
class CTCLayer : public Layer { class CTCLayer : public Layer {
public: public:
explicit CTCLayer(const LayerConfig& config) : Layer(config) {} explicit CTCLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
...@@ -31,7 +31,7 @@ public: ...@@ -31,7 +31,7 @@ public:
const Argument& softmaxSeqs, const Argument& softmaxSeqs,
const Argument& labelSeqs); const Argument& labelSeqs);
protected: protected:
size_t numClasses_; size_t numClasses_;
bool normByTimes_; bool normByTimes_;
std::vector<LinearChainCTC> ctcs_; std::vector<LinearChainCTC> ctcs_;
......
...@@ -24,11 +24,11 @@ namespace paddle { ...@@ -24,11 +24,11 @@ namespace paddle {
*/ */
class ClipLayer : public Layer { class ClipLayer : public Layer {
protected: protected:
double min_; double min_;
double max_; double max_;
public: public:
explicit ClipLayer(const LayerConfig& config) : Layer(config) {} explicit ClipLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
......
...@@ -23,7 +23,7 @@ namespace paddle { ...@@ -23,7 +23,7 @@ namespace paddle {
* each input as one row for the output of this layer and apply activation. * each input as one row for the output of this layer and apply activation.
*/ */
class ConcatenateLayer : public Layer { class ConcatenateLayer : public Layer {
public: public:
explicit ConcatenateLayer(const LayerConfig& config) : Layer(config) {} explicit ConcatenateLayer(const LayerConfig& config) : Layer(config) {}
~ConcatenateLayer() {} ~ConcatenateLayer() {}
...@@ -97,7 +97,7 @@ void ConcatenateLayer::backward(const UpdateCallback& callback) { ...@@ -97,7 +97,7 @@ void ConcatenateLayer::backward(const UpdateCallback& callback) {
* processed by a Projection. * processed by a Projection.
*/ */
class ConcatenateLayer2 : public Layer { class ConcatenateLayer2 : public Layer {
public: public:
explicit ConcatenateLayer2(const LayerConfig& config) : Layer(config) {} explicit ConcatenateLayer2(const LayerConfig& config) : Layer(config) {}
~ConcatenateLayer2() {} ~ConcatenateLayer2() {}
...@@ -108,7 +108,7 @@ public: ...@@ -108,7 +108,7 @@ public:
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
std::vector<std::unique_ptr<Projection>> projections_; std::vector<std::unique_ptr<Projection>> projections_;
std::vector<Argument> projOutput_; std::vector<Argument> projOutput_;
std::vector<std::pair<size_t, size_t>> projCol_; std::vector<std::pair<size_t, size_t>> projCol_;
......
...@@ -42,7 +42,7 @@ namespace paddle { ...@@ -42,7 +42,7 @@ namespace paddle {
* The config file api is context_projection. * The config file api is context_projection.
*/ */
class ContextProjection : public Projection { class ContextProjection : public Projection {
public: public:
/** /**
* Constructor. If context_start is zero and context_lenth is one, it will * Constructor. If context_start is zero and context_lenth is one, it will
* set trainable_padding false. trainable_padding is an optional arguments * set trainable_padding false. trainable_padding is an optional arguments
...@@ -63,7 +63,7 @@ public: ...@@ -63,7 +63,7 @@ public:
virtual bool init(); virtual bool init();
protected: protected:
std::unique_ptr<Weight> weight_; std::unique_ptr<Weight> weight_;
/// number of extra timesteps added at the beginning /// number of extra timesteps added at the beginning
size_t beginPad_; size_t beginPad_;
......
...@@ -26,7 +26,7 @@ namespace paddle { ...@@ -26,7 +26,7 @@ namespace paddle {
* calculate convolution operation. * calculate convolution operation.
*/ */
class Conv3DLayer : public ConvBaseLayer { class Conv3DLayer : public ConvBaseLayer {
public: public:
explicit Conv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {} explicit Conv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {}
~Conv3DLayer() {} ~Conv3DLayer() {}
...@@ -40,7 +40,7 @@ public: ...@@ -40,7 +40,7 @@ public:
void bpropWeights(int i); void bpropWeights(int i);
size_t getSize(); size_t getSize();
protected: protected:
// Figure out the dimensions for individual gemms. // Figure out the dimensions for individual gemms.
IntV M_; /// numFilters_ / filter_group_; IntV M_; /// numFilters_ / filter_group_;
IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_ IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_
......
...@@ -24,7 +24,7 @@ namespace paddle { ...@@ -24,7 +24,7 @@ namespace paddle {
*/ */
class ConvBaseLayer : public Layer { class ConvBaseLayer : public Layer {
protected: protected:
typedef std::vector<int> IntV; typedef std::vector<int> IntV;
/// True if it's deconv layer, false if it's convolution layer /// True if it's deconv layer, false if it's convolution layer
...@@ -88,7 +88,7 @@ protected: ...@@ -88,7 +88,7 @@ protected:
/// of output size. /// of output size.
bool caffeMode_; bool caffeMode_;
public: public:
explicit ConvBaseLayer(const LayerConfig& config) : Layer(config) {} explicit ConvBaseLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
......
...@@ -29,7 +29,7 @@ namespace paddle { ...@@ -29,7 +29,7 @@ namespace paddle {
*/ */
class ConvBaseOperator : public Operator { class ConvBaseOperator : public Operator {
public: public:
ConvBaseOperator(const OperatorConfig &config, bool useGpu); ConvBaseOperator(const OperatorConfig &config, bool useGpu);
/** /**
* Free workspace in device and destroy cudnn tensor descriptor. * Free workspace in device and destroy cudnn tensor descriptor.
...@@ -46,7 +46,7 @@ public: ...@@ -46,7 +46,7 @@ public:
hl_destroy_convolution_descriptor(convDesc_); hl_destroy_convolution_descriptor(convDesc_);
} }
protected: protected:
/** /**
* Get convolution parameters from layer config and * Get convolution parameters from layer config and
* initialize member variables. * initialize member variables.
......
...@@ -23,7 +23,7 @@ namespace paddle { ...@@ -23,7 +23,7 @@ namespace paddle {
* @brief Base class for ConvProjection and ConvTransProjection. * @brief Base class for ConvProjection and ConvTransProjection.
*/ */
class ConvBaseProjection : public Projection { class ConvBaseProjection : public Projection {
public: public:
/** /**
* Constructor. * Constructor.
*/ */
...@@ -33,7 +33,7 @@ public: ...@@ -33,7 +33,7 @@ public:
~ConvBaseProjection(); ~ConvBaseProjection();
protected: protected:
void getConvParams(); void getConvParams();
void initCudnn(); void initCudnn();
......
...@@ -29,7 +29,7 @@ namespace paddle { ...@@ -29,7 +29,7 @@ namespace paddle {
*/ */
class ConvOperator : public ConvBaseOperator { class ConvOperator : public ConvBaseOperator {
public: public:
ConvOperator(const OperatorConfig &config, bool useGpu) ConvOperator(const OperatorConfig &config, bool useGpu)
: ConvBaseOperator(config, useGpu) {} : ConvBaseOperator(config, useGpu) {}
/** /**
......
...@@ -23,7 +23,7 @@ namespace paddle { ...@@ -23,7 +23,7 @@ namespace paddle {
* @brief Convolution projection do the same calculation with CudnnConvLayer. * @brief Convolution projection do the same calculation with CudnnConvLayer.
*/ */
class ConvProjection : public ConvBaseProjection { class ConvProjection : public ConvBaseProjection {
public: public:
/** /**
* Constructor. * Constructor.
*/ */
......
...@@ -42,7 +42,7 @@ namespace paddle { ...@@ -42,7 +42,7 @@ namespace paddle {
*/ */
class ConvShiftLayer : public Layer { class ConvShiftLayer : public Layer {
public: public:
explicit ConvShiftLayer(const LayerConfig& config) : Layer(config) {} explicit ConvShiftLayer(const LayerConfig& config) : Layer(config) {}
~ConvShiftLayer() {} ~ConvShiftLayer() {}
......
...@@ -29,7 +29,7 @@ namespace paddle { ...@@ -29,7 +29,7 @@ namespace paddle {
*/ */
class ConvTransOperator : public ConvBaseOperator { class ConvTransOperator : public ConvBaseOperator {
public: public:
ConvTransOperator(const OperatorConfig &config, bool useGpu) ConvTransOperator(const OperatorConfig &config, bool useGpu)
: ConvBaseOperator(config, useGpu) {} : ConvBaseOperator(config, useGpu) {}
/** /**
......
...@@ -23,7 +23,7 @@ namespace paddle { ...@@ -23,7 +23,7 @@ namespace paddle {
* @brief Convolution projection do the same calculation with CudnnConvLayer. * @brief Convolution projection do the same calculation with CudnnConvLayer.
*/ */
class ConvTransProjection : public ConvBaseProjection { class ConvTransProjection : public ConvBaseProjection {
public: public:
/** /**
* Constructor. * Constructor.
*/ */
......
...@@ -36,7 +36,7 @@ namespace paddle { ...@@ -36,7 +36,7 @@ namespace paddle {
* The config file api is linear_comb_layer. * The config file api is linear_comb_layer.
*/ */
class ConvexCombinationLayer : public Layer { class ConvexCombinationLayer : public Layer {
protected: protected:
/// A matrix pointer pointing to second input. /// A matrix pointer pointing to second input.
MatrixPtr tmpMtx0; MatrixPtr tmpMtx0;
/// A matrix pointer pointing to first input. /// A matrix pointer pointing to first input.
...@@ -44,7 +44,7 @@ protected: ...@@ -44,7 +44,7 @@ protected:
/// A matrix pointer pointing to output. /// A matrix pointer pointing to output.
MatrixPtr tmpRow1; MatrixPtr tmpRow1;
public: public:
explicit ConvexCombinationLayer(const LayerConfig& config) : Layer(config) {} explicit ConvexCombinationLayer(const LayerConfig& config) : Layer(config) {}
~ConvexCombinationLayer() {} ~ConvexCombinationLayer() {}
......
...@@ -33,7 +33,7 @@ namespace paddle { ...@@ -33,7 +33,7 @@ namespace paddle {
* The config file api is cos_sim. * The config file api is cos_sim.
*/ */
class CosSimLayer : public Layer { class CosSimLayer : public Layer {
public: public:
explicit CosSimLayer(const LayerConfig& config) : Layer(config) {} explicit CosSimLayer(const LayerConfig& config) : Layer(config) {}
~CosSimLayer() {} ~CosSimLayer() {}
......
...@@ -32,7 +32,7 @@ namespace paddle { ...@@ -32,7 +32,7 @@ namespace paddle {
*/ */
class CosSimVecMatLayer : public Layer { class CosSimVecMatLayer : public Layer {
protected: protected:
MatrixPtr tmpMtx0; MatrixPtr tmpMtx0;
MatrixPtr tmpMtx1; MatrixPtr tmpMtx1;
MatrixPtr tmpRow0; MatrixPtr tmpRow0;
...@@ -40,7 +40,7 @@ protected: ...@@ -40,7 +40,7 @@ protected:
MatrixPtr tmpRow2; MatrixPtr tmpRow2;
MatrixPtr tmpRow3; MatrixPtr tmpRow3;
public: public:
explicit CosSimVecMatLayer(const LayerConfig& config) : Layer(config) {} explicit CosSimVecMatLayer(const LayerConfig& config) : Layer(config) {}
~CosSimVecMatLayer() {} ~CosSimVecMatLayer() {}
......
...@@ -716,7 +716,7 @@ void HuberTwoClassification::backwardImp(Matrix& output, ...@@ -716,7 +716,7 @@ void HuberTwoClassification::backwardImp(Matrix& output,
* \f] * \f]
*/ */
class SumCostLayer : public Layer { class SumCostLayer : public Layer {
public: public:
explicit SumCostLayer(const LayerConfig& config) : Layer(config) {} explicit SumCostLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
......
...@@ -29,7 +29,7 @@ namespace paddle { ...@@ -29,7 +29,7 @@ namespace paddle {
* handled by the base class. * handled by the base class.
*/ */
class CostLayer : public Layer { class CostLayer : public Layer {
public: public:
explicit CostLayer(const LayerConfig& config) : Layer(config) {} explicit CostLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
...@@ -51,7 +51,7 @@ public: ...@@ -51,7 +51,7 @@ public:
Argument& label, Argument& label,
Matrix& outputGrad) = 0; Matrix& outputGrad) = 0;
protected: protected:
LayerPtr weightLayer_; LayerPtr weightLayer_;
real coeff_; real coeff_;
}; };
...@@ -65,7 +65,7 @@ protected: ...@@ -65,7 +65,7 @@ protected:
* \f] * \f]
*/ */
class MultiClassCrossEntropy : public CostLayer { class MultiClassCrossEntropy : public CostLayer {
public: public:
explicit MultiClassCrossEntropy(const LayerConfig& config) explicit MultiClassCrossEntropy(const LayerConfig& config)
: CostLayer(config) {} : CostLayer(config) {}
...@@ -95,7 +95,7 @@ public: ...@@ -95,7 +95,7 @@ public:
* In Proceedings of the ACL 2014 Conference. * In Proceedings of the ACL 2014 Conference.
*/ */
class MultiClassCrossEntropyWithSelfNorm : public CostLayer { class MultiClassCrossEntropyWithSelfNorm : public CostLayer {
public: public:
explicit MultiClassCrossEntropyWithSelfNorm(const LayerConfig& config) explicit MultiClassCrossEntropyWithSelfNorm(const LayerConfig& config)
: CostLayer(config) {} : CostLayer(config) {}
...@@ -108,7 +108,7 @@ public: ...@@ -108,7 +108,7 @@ public:
Argument& label, Argument& label,
Matrix& outputGrad) override; Matrix& outputGrad) override;
protected: protected:
MatrixPtr sftMaxSum_; MatrixPtr sftMaxSum_;
MatrixPtr sumInv_; MatrixPtr sumInv_;
}; };
...@@ -120,7 +120,7 @@ protected: ...@@ -120,7 +120,7 @@ protected:
* \f] * \f]
*/ */
class SoftBinaryClassCrossEntropy : public CostLayer { class SoftBinaryClassCrossEntropy : public CostLayer {
public: public:
explicit SoftBinaryClassCrossEntropy(const LayerConfig& config) explicit SoftBinaryClassCrossEntropy(const LayerConfig& config)
: CostLayer(config) {} : CostLayer(config) {}
...@@ -133,7 +133,7 @@ public: ...@@ -133,7 +133,7 @@ public:
Argument& label, Argument& label,
Matrix& outputGrad) override; Matrix& outputGrad) override;
protected: protected:
MatrixPtr targetPerDim_; MatrixPtr targetPerDim_;
}; };
...@@ -145,7 +145,7 @@ protected: ...@@ -145,7 +145,7 @@ protected:
* \f] * \f]
*/ */
class SumOfSquaresCostLayer : public CostLayer { class SumOfSquaresCostLayer : public CostLayer {
public: public:
explicit SumOfSquaresCostLayer(const LayerConfig& config) explicit SumOfSquaresCostLayer(const LayerConfig& config)
: CostLayer(config) {} : CostLayer(config) {}
...@@ -171,7 +171,7 @@ public: ...@@ -171,7 +171,7 @@ public:
* x = output - label * x = output - label
*/ */
class SmoothL1CostLayer : public CostLayer { class SmoothL1CostLayer : public CostLayer {
public: public:
explicit SmoothL1CostLayer(const LayerConfig& config) : CostLayer(config) {} explicit SmoothL1CostLayer(const LayerConfig& config) : CostLayer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
...@@ -197,7 +197,7 @@ public: ...@@ -197,7 +197,7 @@ public:
* Rank useing Gradient Descent. * Rank useing Gradient Descent.
*/ */
class RankingCost : public Layer { class RankingCost : public Layer {
public: public:
explicit RankingCost(const LayerConfig& config) : Layer(config) {} explicit RankingCost(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
...@@ -225,7 +225,7 @@ public: ...@@ -225,7 +225,7 @@ public:
(void)outputGrad; (void)outputGrad;
} }
private: private:
double posPairCount_; double posPairCount_;
double negPairCount_; double negPairCount_;
MatrixPtr margin_; MatrixPtr margin_;
...@@ -250,7 +250,7 @@ private: ...@@ -250,7 +250,7 @@ private:
* with Nonsmooth Cost Functions. * with Nonsmooth Cost Functions.
*/ */
class LambdaCost : public Layer { class LambdaCost : public Layer {
public: public:
explicit LambdaCost(const LayerConfig& config) : Layer(config) {} explicit LambdaCost(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
...@@ -270,7 +270,7 @@ public: ...@@ -270,7 +270,7 @@ public:
real* gradData, real* gradData,
int size); int size);
private: private:
MatrixPtr marginGrad_; MatrixPtr marginGrad_;
int truncationSize_; int truncationSize_;
int maxSortSize_; int maxSortSize_;
...@@ -287,10 +287,10 @@ private: ...@@ -287,10 +287,10 @@ private:
* \f] * \f]
*/ */
class MultiBinaryLabelCrossEntropy : public CostLayer { class MultiBinaryLabelCrossEntropy : public CostLayer {
protected: protected:
MatrixPtr targetPerDim_; MatrixPtr targetPerDim_;
public: public:
explicit MultiBinaryLabelCrossEntropy(const LayerConfig& config) explicit MultiBinaryLabelCrossEntropy(const LayerConfig& config)
: CostLayer(config) {} : CostLayer(config) {}
...@@ -308,7 +308,7 @@ public: ...@@ -308,7 +308,7 @@ public:
* A base layer for HuberRegressionLoss and HuberTwoClassification. * A base layer for HuberRegressionLoss and HuberTwoClassification.
*/ */
class HuberCost : public CostLayer { class HuberCost : public CostLayer {
public: public:
std::vector<Argument> tmpCpuInput_; std::vector<Argument> tmpCpuInput_;
explicit HuberCost(const LayerConfig& config) : CostLayer(config) {} explicit HuberCost(const LayerConfig& config) : CostLayer(config) {}
...@@ -331,7 +331,7 @@ public: ...@@ -331,7 +331,7 @@ public:
* Loss = delta * abs(y - f) - 0.5 * delta^2, otherwise * Loss = delta * abs(y - f) - 0.5 * delta^2, otherwise
*/ */
class HuberRegressionLoss : public HuberCost { class HuberRegressionLoss : public HuberCost {
public: public:
explicit HuberRegressionLoss(const LayerConfig& config) : HuberCost(config) {} explicit HuberRegressionLoss(const LayerConfig& config) : HuberCost(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
...@@ -343,7 +343,7 @@ public: ...@@ -343,7 +343,7 @@ public:
Argument& label, Argument& label,
Matrix& outputGrad) override; Matrix& outputGrad) override;
protected: protected:
real delta_; real delta_;
}; };
...@@ -356,7 +356,7 @@ protected: ...@@ -356,7 +356,7 @@ protected:
* Loss = 0, otherwise * Loss = 0, otherwise
*/ */
class HuberTwoClassification : public HuberCost { class HuberTwoClassification : public HuberCost {
public: public:
explicit HuberTwoClassification(const LayerConfig& config) explicit HuberTwoClassification(const LayerConfig& config)
: HuberCost(config) {} : HuberCost(config) {}
......
...@@ -28,7 +28,7 @@ namespace paddle { ...@@ -28,7 +28,7 @@ namespace paddle {
* crop input as this shape conf * crop input as this shape conf
*/ */
class CropLayer : public Layer { class CropLayer : public Layer {
public: public:
explicit CropLayer(const LayerConfig& config) : Layer(config) {} explicit CropLayer(const LayerConfig& config) : Layer(config) {}
~CropLayer() {} ~CropLayer() {}
...@@ -38,7 +38,7 @@ public: ...@@ -38,7 +38,7 @@ public:
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
void setOutDims(); void setOutDims();
void setInDims(); void setInDims();
......
...@@ -44,7 +44,7 @@ struct BeamExpansion { ...@@ -44,7 +44,7 @@ struct BeamExpansion {
typedef std::shared_ptr<BeamExpansion> BeamExpansionPtr; typedef std::shared_ptr<BeamExpansion> BeamExpansionPtr;
class CostForOneSequence { class CostForOneSequence {
public: public:
CostForOneSequence() CostForOneSequence()
: beamSize_(0), validExpansionCount_(0), goldAsExtraPath_(false) {} : beamSize_(0), validExpansionCount_(0), goldAsExtraPath_(false) {}
void setData(const BeamExpansionPtr bPtr, size_t beamSize) { void setData(const BeamExpansionPtr bPtr, size_t beamSize) {
...@@ -64,7 +64,7 @@ public: ...@@ -64,7 +64,7 @@ public:
real forward(); real forward();
void backward(); void backward();
private: private:
void calValidExpandStep(); void calValidExpandStep();
void constructTotalExpansion(); void constructTotalExpansion();
size_t initLastExpansion(); size_t initLastExpansion();
...@@ -93,14 +93,14 @@ private: ...@@ -93,14 +93,14 @@ private:
}; };
class CrossEntropyOverBeam : public Layer { class CrossEntropyOverBeam : public Layer {
public: public:
explicit CrossEntropyOverBeam(const LayerConfig& config) : Layer(config) {} explicit CrossEntropyOverBeam(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override; void backward(const UpdateCallback& callback) override;
private: private:
void checkInputs(); void checkInputs();
void copyInputsToCpu(); void copyInputsToCpu();
void resizeOutput(); void resizeOutput();
......
...@@ -30,7 +30,7 @@ namespace paddle { ...@@ -30,7 +30,7 @@ namespace paddle {
*/ */
class CudnnBatchNormLayer : public BatchNormBaseLayer { class CudnnBatchNormLayer : public BatchNormBaseLayer {
public: public:
explicit CudnnBatchNormLayer(const LayerConfig& config) explicit CudnnBatchNormLayer(const LayerConfig& config)
: BatchNormBaseLayer(config) {} : BatchNormBaseLayer(config) {}
...@@ -46,7 +46,7 @@ public: ...@@ -46,7 +46,7 @@ public:
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
/// Epsilon value used in the batch normalization formula. /// Epsilon value used in the batch normalization formula.
/// Same epsilon value should be used in forward and backward functions. /// Same epsilon value should be used in forward and backward functions.
double eps_; double eps_;
......
...@@ -31,14 +31,14 @@ namespace paddle { ...@@ -31,14 +31,14 @@ namespace paddle {
* The config file api is img_conv_layer. * The config file api is img_conv_layer.
*/ */
class CudnnConvBaseLayer : public ConvBaseLayer { class CudnnConvBaseLayer : public ConvBaseLayer {
protected: protected:
std::vector<std::unique_ptr<ProjectionConfig>> projConf_; std::vector<std::unique_ptr<ProjectionConfig>> projConf_;
std::vector<std::unique_ptr<Projection>> projections_; std::vector<std::unique_ptr<Projection>> projections_;
hl_tensor_descriptor biasDesc_; hl_tensor_descriptor biasDesc_;
hl_tensor_descriptor outputDesc_; hl_tensor_descriptor outputDesc_;
public: public:
explicit CudnnConvBaseLayer(const LayerConfig& config) explicit CudnnConvBaseLayer(const LayerConfig& config)
: ConvBaseLayer(config) {} : ConvBaseLayer(config) {}
......
...@@ -26,7 +26,7 @@ namespace paddle { ...@@ -26,7 +26,7 @@ namespace paddle {
*/ */
class CudnnPoolLayer : public PoolLayer { class CudnnPoolLayer : public PoolLayer {
protected: protected:
int windowHeight, windowWidth; int windowHeight, windowWidth;
int heightPadding, widthPadding, strideHeight, strideWidth; int heightPadding, widthPadding, strideHeight, strideWidth;
int imageH_, imageW_, outputH_, outputW_; int imageH_, imageW_, outputH_, outputW_;
...@@ -40,7 +40,7 @@ protected: ...@@ -40,7 +40,7 @@ protected:
/// A description of a pooling operation. /// A description of a pooling operation.
hl_pooling_descriptor poolingDesc_; hl_pooling_descriptor poolingDesc_;
public: public:
static bool typeCheck(const std::string& poolType, static bool typeCheck(const std::string& poolType,
hl_pooling_mode_t* mode = nullptr); hl_pooling_mode_t* mode = nullptr);
explicit CudnnPoolLayer(const LayerConfig& config); explicit CudnnPoolLayer(const LayerConfig& config);
......
...@@ -25,7 +25,7 @@ namespace paddle { ...@@ -25,7 +25,7 @@ namespace paddle {
* The config file api is data_layer. * The config file api is data_layer.
*/ */
class DataLayer : public Layer { class DataLayer : public Layer {
public: public:
explicit DataLayer(const LayerConfig& config) : Layer(config) {} explicit DataLayer(const LayerConfig& config) : Layer(config) {}
virtual void setData(const Argument& data) { data_ = data; } virtual void setData(const Argument& data) { data_ = data; }
...@@ -58,10 +58,10 @@ public: ...@@ -58,10 +58,10 @@ public:
} }
} }
private: private:
void copyDataToOutput(Argument& output); void copyDataToOutput(Argument& output);
protected: protected:
Argument data_; Argument data_;
}; };
......
...@@ -37,7 +37,7 @@ namespace paddle { ...@@ -37,7 +37,7 @@ namespace paddle {
*/ */
class DataNormLayer : public Layer { class DataNormLayer : public Layer {
public: public:
enum NormalizationStrategy { kZScore = 0, kMinMax = 1, kDecimalScaling = 2 }; enum NormalizationStrategy { kZScore = 0, kMinMax = 1, kDecimalScaling = 2 };
explicit DataNormLayer(const LayerConfig& config) : Layer(config) {} explicit DataNormLayer(const LayerConfig& config) : Layer(config) {}
...@@ -50,7 +50,7 @@ public: ...@@ -50,7 +50,7 @@ public:
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
int mode_; int mode_;
std::unique_ptr<Weight> weight_; std::unique_ptr<Weight> weight_;
MatrixPtr min_; MatrixPtr min_;
......
...@@ -27,7 +27,7 @@ namespace paddle { ...@@ -27,7 +27,7 @@ namespace paddle {
* calculate deconvolution3D operation. * calculate deconvolution3D operation.
*/ */
class DeConv3DLayer : public ConvBaseLayer { class DeConv3DLayer : public ConvBaseLayer {
public: public:
explicit DeConv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {} explicit DeConv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {}
~DeConv3DLayer() {} ~DeConv3DLayer() {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
...@@ -40,7 +40,7 @@ public: ...@@ -40,7 +40,7 @@ public:
void bpropWeights(int i); void bpropWeights(int i);
size_t getSize(); size_t getSize();
protected: protected:
// Figure out the dimensions for individual gemms. // Figure out the dimensions for individual gemms.
IntV M_; /// numFilters_ / filter_group_; IntV M_; /// numFilters_ / filter_group_;
IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_ IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_
......
...@@ -33,7 +33,7 @@ namespace paddle { ...@@ -33,7 +33,7 @@ namespace paddle {
*/ */
class DetectionOutputLayer : public Layer { class DetectionOutputLayer : public Layer {
public: public:
explicit DetectionOutputLayer(const LayerConfig& config) : Layer(config) {} explicit DetectionOutputLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
...@@ -42,7 +42,7 @@ public: ...@@ -42,7 +42,7 @@ public:
void backward(const UpdateCallback& callback = nullptr) {} void backward(const UpdateCallback& callback = nullptr) {}
protected: protected:
inline LayerPtr getPriorBoxLayer() { return inputLayers_[0]; } inline LayerPtr getPriorBoxLayer() { return inputLayers_[0]; }
inline LayerPtr getLocInputLayer(size_t index) { inline LayerPtr getLocInputLayer(size_t index) {
...@@ -53,7 +53,7 @@ protected: ...@@ -53,7 +53,7 @@ protected:
return inputLayers_[1 + inputNum_ + index]; return inputLayers_[1 + inputNum_ + index];
} }
private: private:
size_t numClasses_; // number of classes size_t numClasses_; // number of classes
size_t inputNum_; // number of input layers size_t inputNum_; // number of input layers
real nmsThreshold_; real nmsThreshold_;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册