未验证 提交 8f7b020b 编写于 作者: W Wu Yi 提交者: GitHub

fix develop build issue (#10978)

* fix develop build issue

* fix google style

* cpplint check only fluid
上级 8075a11f
......@@ -19,7 +19,7 @@ BasedOnStyle: Google
IndentWidth: 2
TabWidth: 2
ContinuationIndentWidth: 4
AccessModifierOffset: -2 # The private/protected/public has no indent in class
AccessModifierOffset: -1 # The private/protected/public has no indent in class
Standard: Cpp11
AllowAllParametersOfDeclarationOnNextLine: true
BinPackParameters: false
......
......@@ -94,7 +94,7 @@ void UpdateCallback::apply(Parameter* p) {
}
class UpdateCallbackWrapper {
public:
public:
explicit UpdateCallbackWrapper(const UpdateCallback& callback)
: callback(const_cast<UpdateCallback&>(callback)) {}
......@@ -105,7 +105,7 @@ public:
delete p;
}
private:
private:
UpdateCallback& callback;
};
......
......@@ -59,9 +59,10 @@ class RangeError {};
/// Not support Error, such as access GPU memory directly, etc.
class UnsupportError : public std::runtime_error {
public:
UnsupportError() : std::runtime_error(" "){};
UnsupportError(const std::string& message) : std::runtime_error(message){};
public:
UnsupportError() : std::runtime_error(" ") {}
explicit UnsupportError(const std::string& message)
: std::runtime_error(message) {}
};
/// This type will map to python's list of float.
......@@ -105,7 +106,7 @@ class Matrix {
DISABLE_COPY(Matrix);
static Matrix* createByPaddleMatrixPtr(void* sharedPtr);
public:
public:
virtual ~Matrix();
/**
......@@ -231,7 +232,7 @@ public:
bool isGpu() const;
private:
private:
void* getSharedPtr() const;
MatrixPrivate* m;
......@@ -248,7 +249,7 @@ class Vector {
void* getSharedPtr();
public:
public:
~Vector();
/// Create Vector filled with zero.
......@@ -310,10 +311,10 @@ public:
/// __len__ in python
size_t getSize() const;
private:
private:
VectorPrivate* m;
private:
private:
friend class Parameter;
friend class ParameterOptimizer;
friend struct ParameterTraverseCallbackPrivate;
......@@ -325,7 +326,7 @@ class IVector {
DISABLE_COPY(IVector);
static IVector* createByPaddleVectorPtr(void* ptr);
public:
public:
/// Create IVector filled with zero
static IVector* createZero(size_t sz, bool useGpu = isUsingGpu());
......@@ -389,7 +390,7 @@ public:
/// This method will map to python __len__();
size_t getSize() const;
private:
private:
void* getSharedPtr() const;
friend class Arguments;
......@@ -400,11 +401,11 @@ struct ArgumentsPrivate;
/// The Arguments is actual a std::vector<paddle::Argument> in paddle.
class Arguments {
private:
private:
Arguments(); // Internal Create.
DISABLE_COPY(Arguments);
public:
public:
/**
* Create a arguments with size.
* Note that it can be zero.
......@@ -475,12 +476,12 @@ public:
float sum() const;
private:
private:
static Arguments* createByPaddleArgumentVector(void* ptr);
static Arguments* createByPaddleArgument(const void* ptr);
void* getInternalArgumentsPtr() const;
private:
private:
ArgumentsPrivate* m;
friend class Trainer;
friend class GradientMachine;
......@@ -507,7 +508,7 @@ class ParameterConfig {
static ParameterConfig* createParameterConfigFromParameterPtr(void* ptr);
void* getRawPtr();
public:
public:
~ParameterConfig();
/**
......@@ -515,10 +516,10 @@ public:
*/
std::string toProtoString() const;
private:
private:
ParameterConfigPrivate* m;
private:
private:
friend class Parameter;
friend class ParameterOptimizer;
friend struct ParameterTraverseCallbackPrivate;
......@@ -529,7 +530,7 @@ class OptimizationConfig {
DISABLE_COPY(OptimizationConfig);
OptimizationConfig();
public:
public:
static OptimizationConfig* createFromProtoString(const std::string& str);
~OptimizationConfig();
......@@ -538,7 +539,7 @@ public:
*/
std::string toProtoString();
private:
private:
OptimizationConfigPrivate* m;
friend class TrainerConfig;
......@@ -549,11 +550,11 @@ private:
struct ParameterPrivate;
class Parameter {
private:
private:
Parameter();
DISABLE_COPY(Parameter);
public:
public:
virtual ~Parameter();
/**
......@@ -580,11 +581,11 @@ public:
size_t getSize() const;
private:
private:
static Parameter* createFromRawPtr(void* ptr);
static Parameter* createFromSharedPtr(void* ptr);
private:
private:
ParameterPrivate* m;
friend class UpdateCallbackWrapper;
friend class GradientMachine;
......@@ -598,14 +599,14 @@ struct ModelConfigPrivate;
* It is used by GradientMachine.
*/
class ModelConfig {
private:
private:
ModelConfig();
DISABLE_COPY(ModelConfig);
public:
public:
virtual ~ModelConfig();
private:
private:
ModelConfigPrivate* m;
friend class TrainerConfig;
friend struct TrainerConfigPrivate;
......@@ -619,11 +620,11 @@ struct TrainerConfigPrivate;
* It is used by GradientMachine.
*/
class TrainerConfig {
private:
private:
TrainerConfig();
DISABLE_COPY(TrainerConfig);
public:
public:
virtual ~TrainerConfig();
static TrainerConfig* createFromTrainerConfigFile(
......@@ -634,7 +635,7 @@ public:
OptimizationConfig* getOptimizationConfig() const;
private:
private:
TrainerConfigPrivate* m;
friend class Trainer;
};
......@@ -654,7 +655,7 @@ private:
* @endcode
*/
class UpdateCallback {
public:
public:
virtual ~UpdateCallback();
virtual void apply(Parameter* p);
};
......@@ -664,14 +665,14 @@ class ParameterTraverseCallback {
DISABLE_COPY(ParameterTraverseCallback);
ParameterTraverseCallback();
public:
public:
~ParameterTraverseCallback();
void apply(const std::vector<Vector*>& vecs,
const ParameterConfig& config,
size_t sparseId);
private:
private:
ParameterTraverseCallbackPrivate* m;
friend class ParameterOptimizer;
};
......@@ -686,7 +687,7 @@ class ParameterOptimizer {
DISABLE_COPY(ParameterOptimizer);
ParameterOptimizer();
public:
public:
static ParameterOptimizer* create(OptimizationConfig* config);
~ParameterOptimizer();
......@@ -710,7 +711,7 @@ public:
ParameterTraverseCallback* needSpecialTraversal(
const ParameterConfig& config) const;
private:
private:
ParameterOptimizerPrivate* m;
};
......@@ -718,11 +719,11 @@ class SequenceGenerator;
class Evaluator;
struct GradientMachinePrivate;
class GradientMachine {
private:
private:
GradientMachine();
DISABLE_COPY(GradientMachine);
public:
public:
virtual ~GradientMachine();
/**
......@@ -817,7 +818,7 @@ public:
void eval(Evaluator* evaluator);
private:
private:
GradientMachinePrivate* m;
static GradientMachine* createFromPaddleModelPtr(
......@@ -833,10 +834,10 @@ private:
struct ParameterUpdaterPrivate;
class ParameterUpdater {
private:
private:
ParameterUpdater();
public:
public:
static ParameterUpdater* createLocalUpdater(OptimizationConfig* config);
static ParameterUpdater* createRemoteUpdater(OptimizationConfig* config,
int passCount,
......@@ -911,17 +912,17 @@ public:
*/
void catchUpWith();
private:
private:
ParameterUpdaterPrivate* m;
};
struct EvaluatorPrivate;
class Evaluator {
private:
private:
Evaluator();
DISABLE_COPY(Evaluator);
public:
public:
~Evaluator();
/**
......@@ -945,7 +946,7 @@ public:
double getValue(const std::string name) const;
private:
private:
EvaluatorPrivate* m;
friend class GradientMachine;
......@@ -953,13 +954,13 @@ private:
struct TrainerPrivate;
class Trainer {
private:
private:
TrainerPrivate* m;
Trainer();
Trainer(TrainerConfig* optConfig, GradientMachine* gm);
DISABLE_COPY(Trainer);
public:
public:
virtual ~Trainer();
/// Create A Trainer By TrainerConfig. using paddle command line.
......@@ -1002,7 +1003,7 @@ public:
/// the N-Best results generated from one input sequence.
class ISequenceResults {
public:
public:
virtual ~ISequenceResults();
/// Number of result.
......@@ -1026,7 +1027,7 @@ class SequenceGenerator {
DISABLE_COPY(SequenceGenerator);
SequenceGenerator();
public:
public:
virtual ~SequenceGenerator();
/**
......@@ -1044,10 +1045,10 @@ public:
void setMaxLength(size_t maxlength);
void setBeamSize(size_t beamSize);
private:
private:
static SequenceGenerator* createByGradientMachineSharedPtr(void* ptr);
friend class GradientMachine;
private:
private:
SequenceGeneratorPrivate* m;
};
......@@ -138,7 +138,7 @@ struct SequenceGeneratorPrivate {
maxLength(0UL),
feedback(__create_feedback__()) {}
private:
private:
static paddle::Argument __create_feedback__() {
paddle::Argument feedback;
feedback.ids = paddle::IVector::create(/* size= */ 1, FLAGS_use_gpu);
......@@ -157,7 +157,7 @@ SequenceGenerator::~SequenceGenerator() { delete m; }
class PathSequenceResults : public ISequenceResults {
// ISequenceResults interface
public:
public:
PathSequenceResults(const std::shared_ptr<std::vector<Path>>& path,
const std::shared_ptr<std::vector<std::string>>& dict)
: path_(path), dict_(dict) {}
......@@ -196,7 +196,7 @@ public:
}
}
private:
private:
std::shared_ptr<std::vector<Path>> path_;
std::shared_ptr<std::vector<std::string>> dict_;
};
......
......@@ -26,7 +26,7 @@ enum GradientMatchineCreateMode {
namespace paddle {
class MyNeuralNetwork : public NeuralNetwork {
public:
public:
MyNeuralNetwork(const std::string& name, NeuralNetwork* network)
: NeuralNetwork(name, network) {}
};
......
......@@ -50,7 +50,7 @@ struct PaddleTensor {
* TODO(Superjomn) Prepare another API for NLP-related usages.
*/
class PaddlePredictor {
public:
public:
struct Config;
PaddlePredictor() = default;
PaddlePredictor(const PaddlePredictor&) = delete;
......@@ -66,6 +66,7 @@ public:
// be thread-safe.
virtual std::unique_ptr<PaddlePredictor> Clone() = 0;
virtual bool InitShared() { return false; }
// Destroy the Predictor.
virtual ~PaddlePredictor() {}
......
......@@ -28,7 +28,7 @@ namespace {
// Timer for timer
class Timer {
public:
public:
double start;
double startu;
void tic() {
......@@ -135,8 +135,8 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs,
std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() {
VLOG(3) << "Predictor::clone";
std::unique_ptr<PaddlePredictorImpl> cls(new PaddlePredictorImpl(config_));
if (!cls->InitShared(this)) {
std::unique_ptr<PaddlePredictor> cls(new PaddlePredictorImpl(config_));
if (!cls->InitShared()) {
LOG(ERROR) << "fail to call InitShared";
return nullptr;
}
......@@ -144,7 +144,7 @@ std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() {
}
// TODO(panyx0718): Consider merge with Init()?
bool PaddlePredictorImpl::InitShared(PaddlePredictorImpl *cls) {
bool PaddlePredictorImpl::InitShared() {
VLOG(3) << "Predictor::init_shared";
// 1. Define place, executor, scope
if (this->config_.device >= 0) {
......
......@@ -41,7 +41,7 @@ struct VisConfig : public PaddlePredictor::Config {
* Do not use this, just a demo indicating how to customize a Predictor.
*/
class PaddlePredictorImpl : public PaddlePredictor {
public:
public:
explicit PaddlePredictorImpl(const VisConfig &config) : config_(config) {}
bool Init();
......@@ -53,8 +53,8 @@ public:
~PaddlePredictorImpl() override{};
private:
bool InitShared(PaddlePredictorImpl *cls);
private:
bool InitShared();
bool SetFeed(const std::vector<PaddleTensor> &input_datas,
std::vector<paddle::framework::LoDTensor> *feeds);
bool GetFetch(const std::vector<paddle::framework::LoDTensor> &fetchs,
......
......@@ -31,7 +31,7 @@ struct DemoConfig : public PaddlePredictor::Config {
* Do not use this, just a demo indicating how to customize a Predictor.
*/
class DemoPredictor : public PaddlePredictor {
public:
public:
explicit DemoPredictor(const DemoConfig &config) {
LOG(INFO) << "I get other_config " << config.other_config;
}
......
......@@ -31,7 +31,7 @@ namespace hppl {
*/
template <class T>
class Active {
public:
public:
typedef T (*forward)(T);
typedef T (*backward)(T, T);
};
......
......@@ -23,128 +23,128 @@ namespace unary {
template <class T>
class add_scale {
private:
private:
const T p;
public:
public:
INLINE add_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a + p; }
};
template <class T>
class sub_scale {
private:
private:
const T p;
public:
public:
INLINE sub_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a - p; }
};
template <class T>
class mul_scale {
private:
private:
const T p;
public:
public:
INLINE mul_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a * p; }
};
template <class T>
class div_scale {
private:
private:
const T p;
public:
public:
INLINE div_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a / p; }
};
template <class T>
class neg {
public:
public:
INLINE T operator()(const T a) const { return -a; }
};
template <class T>
class exp_op {
public:
public:
INLINE T operator()(const T a) const { return std::exp(a); }
};
template <class T>
class log_op {
public:
public:
INLINE T operator()(const T a) const { return std::log(a); }
};
template <class T>
class sqrt_op {
public:
public:
INLINE T operator()(const T a) const { return std::sqrt(a); }
};
template <class T>
class square {
public:
public:
INLINE T operator()(const T a) const { return a * a; }
};
template <class T>
class reciprocal {
public:
public:
INLINE T operator()(const T a) const { return T(1) / a; }
};
template <class T>
class abs {
public:
public:
INLINE T operator()(const T a) const { return a > 0 ? a : -a; }
};
template <class T>
class sign {
public:
public:
INLINE T operator()(const T a) const { return (a > 0) - (a < 0); }
};
template <class T>
class min {
private:
private:
const T p;
public:
public:
INLINE min(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a > p ? p : a; }
};
template <class T>
class max {
private:
private:
const T p;
public:
public:
INLINE max(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a < p ? p : a; }
};
template <class T>
class pow_op {
private:
private:
const T p;
public:
public:
INLINE pow_op(const T s) : p(s) {}
INLINE T operator()(const T a) const { return std::pow(a, p); }
};
template <class T>
class constant {
private:
private:
const T p;
public:
public:
INLINE constant(const T s) : p(s) {}
INLINE T operator()(int i) const { return p; }
INLINE T operator()(int i, int j) const { return p; }
......@@ -152,80 +152,80 @@ public:
template <class T>
class cmp_eq {
private:
private:
const T p;
public:
public:
INLINE cmp_eq(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a == p; }
};
template <class T>
class cmp_ne {
private:
private:
const T p;
public:
public:
INLINE cmp_ne(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a != p; }
};
template <class T>
class cmp_le {
private:
private:
const T p;
public:
public:
INLINE cmp_le(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a <= p; }
};
template <class T>
class cmp_lt {
private:
private:
const T p;
public:
public:
INLINE cmp_lt(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a < p; }
};
template <class T>
class cmp_ge {
private:
private:
const T p;
public:
public:
INLINE cmp_ge(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a >= p; }
};
template <class T>
class cmp_gt {
private:
private:
const T p;
public:
public:
INLINE cmp_gt(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a > p; }
};
template <class T>
class and_op {
private:
private:
const T p;
public:
public:
INLINE and_op(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a && p; }
};
template <class T>
class or_op {
private:
private:
const T p;
public:
public:
INLINE or_op(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a || p; }
};
......@@ -235,96 +235,96 @@ public:
namespace binary {
template <class T>
class add {
public:
public:
INLINE T operator()(const T a, const T b) const { return a + b; }
};
template <class T>
class add_scale {
private:
private:
const T p1;
const T p2;
public:
public:
INLINE add_scale(const T s1, const T s2) : p1(s1), p2(s2) {}
INLINE T operator()(const T a, const T b) const { return p1 * a + p2 * b; }
};
template <class T>
class sub {
public:
public:
INLINE T operator()(const T a, const T b) const { return a - b; }
};
template <class T>
class mul {
public:
public:
INLINE T operator()(const T a, const T b) const { return a * b; }
};
template <class T>
class div {
public:
public:
INLINE T operator()(const T a, const T b) const { return a / b; }
};
template <class T>
class cmp_eq {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a == b; }
};
template <class T>
class cmp_ne {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a != b; }
};
template <class T>
class cmp_le {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a <= b; }
};
template <class T>
class cmp_lt {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a < b; }
};
template <class T>
class cmp_ge {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a >= b; }
};
template <class T>
class cmp_gt {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a > b; }
};
template <class T>
class and_op {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a && b; }
};
template <class T>
class or_op {
public:
public:
INLINE bool operator()(const T a, const T b) const { return a || b; }
};
template <class T>
class min {
public:
public:
INLINE T operator()(const T a, const T b) const { return a > b ? b : a; }
};
template <class T>
class max {
public:
public:
INLINE T operator()(const T a, const T b) const { return a < b ? b : a; }
};
......@@ -332,7 +332,7 @@ public:
#ifndef PADDLE_TYPE_DOUBLE
template <>
class add<__m128> {
public:
public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_add_ps(a, b);
}
......@@ -340,11 +340,11 @@ public:
template <>
class add_scale<__m128> {
private:
private:
const __m128 p1;
const __m128 p2;
public:
public:
INLINE add_scale(const __m128 s1, const __m128 s2) : p1(s1), p2(s2) {}
INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_add_ps(_mm_mul_ps(p1, a), _mm_mul_ps(p2, b));
......@@ -353,7 +353,7 @@ public:
template <>
class sub<__m128> {
public:
public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_sub_ps(a, b);
}
......@@ -361,7 +361,7 @@ public:
template <>
class mul<__m128> {
public:
public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_mul_ps(a, b);
}
......@@ -369,7 +369,7 @@ public:
template <>
class div<__m128> {
public:
public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_div_ps(a, b);
}
......@@ -377,7 +377,7 @@ public:
template <>
class min<__m128> {
public:
public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_min_ps(a, b);
}
......@@ -385,7 +385,7 @@ public:
template <>
class max<__m128> {
public:
public:
INLINE __m128 operator()(const __m128 a, const __m128 b) const {
return _mm_max_ps(a, b);
}
......@@ -393,7 +393,7 @@ public:
#else
template <>
class add<__m128d> {
public:
public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_add_pd(a, b);
}
......@@ -401,11 +401,11 @@ public:
template <>
class add_scale<__m128d> {
private:
private:
const __m128d p1;
const __m128d p2;
public:
public:
INLINE add_scale(const __m128d s1, const __m128d s2) : p1(s1), p2(s2) {}
INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_add_pd(_mm_mul_pd(p1, a), _mm_mul_pd(p2, b));
......@@ -414,7 +414,7 @@ public:
template <>
class sub<__m128d> {
public:
public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_sub_pd(a, b);
}
......@@ -422,7 +422,7 @@ public:
template <>
class mul<__m128d> {
public:
public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_mul_pd(a, b);
}
......@@ -430,7 +430,7 @@ public:
template <>
class div<__m128d> {
public:
public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_div_pd(a, b);
}
......@@ -438,7 +438,7 @@ public:
template <>
class min<__m128d> {
public:
public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_min_pd(a, b);
}
......@@ -446,7 +446,7 @@ public:
template <>
class max<__m128d> {
public:
public:
INLINE __m128d operator()(const __m128d a, const __m128d b) const {
return _mm_max_pd(a, b);
}
......@@ -458,7 +458,7 @@ public:
#ifndef PADDLE_TYPE_DOUBLE
template <>
class add<float32x4_t> {
public:
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
return vaddq_f32(a, b);
......@@ -467,11 +467,11 @@ public:
template <>
class add_scale<float32x4_t> {
private:
private:
const float32x4_t p1;
const float32x4_t p2;
public:
public:
INLINE add_scale(const float32x4_t s1, const float32x4_t s2)
: p1(s1), p2(s2) {}
INLINE float32x4_t operator()(const float32x4_t a,
......@@ -482,7 +482,7 @@ public:
template <>
class sub<float32x4_t> {
public:
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
return vsubq_f32(a, b);
......@@ -491,7 +491,7 @@ public:
template <>
class mul<float32x4_t> {
public:
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
return vmulq_f32(a, b);
......@@ -500,7 +500,7 @@ public:
template <>
class div<float32x4_t> {
public:
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
float32x4_t tmp = vrecpeq_f32(b);
......@@ -510,7 +510,7 @@ public:
template <>
class min<float32x4_t> {
public:
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
return vminq_f32(a, b);
......@@ -519,7 +519,7 @@ public:
template <>
class max<float32x4_t> {
public:
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
return vmaxq_f32(a, b);
......
......@@ -30,7 +30,7 @@ bool hl_lstm_sequence_parallel(int frameSize) {
}
class frameValue {
public:
public:
real *value_;
__device__ frameValue(real *value) : value_(value) {}
template <int reversed, int frameSize>
......
......@@ -33,7 +33,7 @@ namespace paddle {
* \param outputs[0] Image data of NCHW format.
*/
class BlockExpandFunction : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
strides_ = config.get<std::vector<size_t>>("strides");
......@@ -81,7 +81,7 @@ public:
(size_t)blockW()});
}
protected:
protected:
std::vector<size_t> strides_;
std::vector<size_t> paddings_;
std::vector<size_t> blocks_;
......@@ -101,7 +101,7 @@ protected:
template <DeviceType Device>
class BlockExpandForward : public BlockExpandFunction {
public:
public:
void init(const FuncConfig& config) override {
BlockExpandFunction::init(config);
}
......@@ -149,7 +149,7 @@ public:
template <DeviceType Device>
class BlockExpandBackward : public BlockExpandFunction {
public:
public:
void init(const FuncConfig& config) override {
BlockExpandFunction::init(config);
}
......
......@@ -63,12 +63,12 @@ enum ArgType {
ADD_TO = 2,
};
class BufferArg {
public:
public:
void setArgType(ArgType argType) { argType_ = argType; }
ArgType getArgType() const { return argType_; }
public:
public:
BufferArg(ValueType valueType,
const TensorShape& shape,
ArgType argType = UNSPECIFIED)
......@@ -169,7 +169,7 @@ public:
const SequenceArg& sequence() const;
const SparseMatrixArg& sparse() const;
protected:
protected:
void* buf_;
ValueType valueType_;
TensorShape shape_;
......@@ -185,7 +185,7 @@ protected:
// valueType_ = int32
// if a < b then value_.buf_[a] < value_.buf_[b]
class SequenceIdArg : public BufferArg {
public:
public:
SequenceIdArg(const TensorShape& shape, ArgType argType = UNSPECIFIED)
: BufferArg(VALUE_TYPE_INT32, shape, argType) {
bufferType_ = TENSOR_SEQUENCE_ID;
......@@ -212,7 +212,7 @@ public:
size_t numSeqs() const { return numSeqs_; }
private:
private:
size_t numSeqs_;
};
......@@ -222,7 +222,7 @@ private:
// SequenceArg can be used to represent sequences that contain multiple
// unequal lengths.
class SequenceArg : public BufferArg {
public:
public:
SequenceArg(ValueType valueType,
const TensorShape& shape,
ArgType argType = UNSPECIFIED)
......@@ -255,7 +255,7 @@ public:
SequenceIdArg& getSequenceId() { return startPositions_; }
const SequenceIdArg& getSequenceId() const { return startPositions_; }
private:
private:
SequenceIdArg startPositions_;
};
......@@ -263,7 +263,7 @@ private:
// valueType_ == float or double
// shape_.ndims() == 2
class SparseMatrixArg : public BufferArg {
public:
public:
SparseMatrixArg(void* buf,
ValueType valueType,
const TensorShape& shape,
......@@ -353,7 +353,7 @@ public:
SparseDataType dataType() const { return type_; }
private:
private:
BufferArg row_;
BufferArg col_;
size_t nnz_;
......
......@@ -100,7 +100,7 @@ void ContextProjectionForward<DEVICE_TYPE_CPU>(CpuMatrix& out_mat,
*/
template <DeviceType Device>
class ContextProjectionForwardFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
......@@ -146,7 +146,7 @@ public:
begin_pad_);
}
private:
private:
size_t context_length_;
int context_start_;
size_t begin_pad_;
......@@ -223,7 +223,7 @@ void ContextProjectionBackward<DEVICE_TYPE_CPU>(const CpuMatrix& out_grad_mat,
*/
template <DeviceType Device>
class ContextProjectionBackwardFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
......@@ -278,7 +278,7 @@ public:
total_pad_);
}
private:
private:
size_t context_length_;
int context_start_;
size_t begin_pad_;
......@@ -299,7 +299,7 @@ private:
*/
template <DeviceType Device>
class ContextProjectionBackwardDataFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
......@@ -331,7 +331,7 @@ public:
out_grad_mat, in_grad_mat, seq_vec, context_length_, context_start_);
}
private:
private:
size_t context_length_;
int context_start_;
};
......@@ -348,7 +348,7 @@ private:
*/
template <DeviceType Device>
class ContextProjectionBackwardWeightFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
......@@ -382,7 +382,7 @@ public:
begin_pad_);
}
private:
private:
size_t context_length_;
int context_start_;
size_t begin_pad_;
......
......@@ -56,7 +56,7 @@ namespace paddle {
* H and W is height and width of filter.
*/
class ConvFunctionBase : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
strides_ = config.get<std::vector<size_t>>("strides");
......@@ -101,7 +101,7 @@ public:
}
}
protected:
protected:
size_t getFilterHeight(const TensorShape& filter) const {
return filter[filter.ndims() - 2];
}
......
......@@ -97,7 +97,7 @@ class CosSimForwardFunc : public FunctionBase {
CosSimForward<Device>(out_mat, in1_mat, in2_mat, scale_);
}
private:
private:
real scale_;
};
......@@ -227,7 +227,7 @@ class CosSimBackwardFunc : public FunctionBase {
out_grad, out_val, in1_val, in2_val, in1_grad, in2_grad, scale_);
}
private:
private:
real scale_;
};
......
......@@ -112,7 +112,7 @@ void CropGrad<DEVICE_TYPE_CPU>(const real* inGrad,
*/
template <DeviceType Device>
class CropFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -130,7 +130,7 @@ public:
conf_);
}
private:
private:
FuncConfig conf_;
};
......@@ -145,7 +145,7 @@ private:
template <DeviceType Device>
class CropGradFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -163,7 +163,7 @@ public:
conf_);
}
private:
private:
FuncConfig conf_;
};
......
......@@ -160,7 +160,7 @@ void CrossMapNormalGrad<DEVICE_TYPE_CPU>(real* inputsGrad,
*/
template <DeviceType Device>
class CrossMapNormalFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
size_ = config.get<size_t>("size");
......@@ -220,7 +220,7 @@ public:
return ops;
}
private:
private:
size_t size_;
real scale_;
real pow_;
......@@ -260,7 +260,7 @@ private:
*/
template <DeviceType Device>
class CrossMapNormalGradFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
size_ = config.get<size_t>("size");
......@@ -328,7 +328,7 @@ public:
return ops;
}
private:
private:
size_t size_;
real scale_;
real pow_;
......
......@@ -19,7 +19,7 @@ namespace paddle {
template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
......@@ -43,7 +43,7 @@ public:
template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
......@@ -66,7 +66,7 @@ public:
template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,
......@@ -93,7 +93,7 @@ public:
*/
template <DeviceType Device>
class DepthwiseConvFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......@@ -156,7 +156,7 @@ public:
*/
template <DeviceType Device>
class DepthwiseConvGradInputFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......@@ -220,7 +220,7 @@ public:
*/
template <DeviceType Device>
class DepthwiseConvGradFilterFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......
......@@ -44,7 +44,7 @@ namespace paddle {
*/
template <DeviceType Device, class T>
class DepthwiseConvFunctor {
public:
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
......@@ -89,7 +89,7 @@ public:
*/
template <DeviceType Device, class T>
class DepthwiseConvGradInputFunctor {
public:
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
......@@ -135,7 +135,7 @@ public:
*/
template <DeviceType Device, class T>
class DepthwiseConvGradFilterFunctor {
public:
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,
......
......@@ -199,7 +199,7 @@ __global__ void ConvolutionDepthwiseFilterBackward(const int num_i,
template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
......@@ -249,7 +249,7 @@ public:
template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
......@@ -300,7 +300,7 @@ public:
template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,
......
......@@ -46,7 +46,7 @@ int GetCpuCount() { return 1; }
#endif
class EigenDeviceWarpper {
public: // NOLINT
public: // NOLINT
#if EIGEN_USE_THREADS
static Eigen::ThreadPoolDevice* device() {
const int num_cpus = GetCpuCount();
......
......@@ -29,7 +29,7 @@ namespace paddle {
* The argument type of Function::init.
*/
class FuncConfig {
public:
public:
template <typename T>
T get(const std::string& key, Error* err = nullptr) const {
try {
......@@ -59,7 +59,7 @@ public:
return *this;
}
protected:
protected:
mutable std::unordered_map<std::string, any> valueMap_;
};
......@@ -77,7 +77,7 @@ protected:
* in the BufferArgs life time.
*/
class BufferArgs {
public:
public:
BufferArgs() {}
~BufferArgs() {
......@@ -137,7 +137,7 @@ public:
void addArg(SparseMatrixArg& arg) { args_.push_back(&arg); }
private:
private:
std::vector<BufferArg*> args_;
// The BufferArg object is constructed and freed by BufferArgs.
std::vector<BufferArg*> _args_;
......@@ -163,7 +163,7 @@ private:
* If Function has more than one output, each output can have different modes.
*/
class FunctionBase {
public:
public:
virtual ~FunctionBase() {}
virtual void init(const FuncConfig& config) {}
......@@ -192,7 +192,7 @@ public:
static ClassRegistrar<FunctionBase> funcRegistrar_;
protected:
protected:
// numInputs_ and numOutputs_ represents the maximum
// input and output supported by Function.
// Some functions are optimized for input and output,
......
......@@ -39,7 +39,7 @@ struct Allocator<DEVICE_TYPE_GPU> {
// Copy argument1 to argument2
template <DeviceType DType1, DeviceType DType2>
class CopyArgument {
public:
public:
void operator()(const BufferArg& arg1, BufferArg& arg2) {
CHECK_EQ(arg1.valueType(), arg2.valueType());
CHECK_LE(arg1.shape().getElements(), arg2.shape().getElements());
......@@ -95,7 +95,7 @@ public:
*/
template <DeviceType DType1, DeviceType DType2>
class Compare2Function {
public:
public:
typedef typename test::Allocator<DType1>::type Allocator1;
typedef typename test::Allocator<DType2>::type Allocator2;
typedef typename Tensor<real, DType1>::Vector Vector1;
......@@ -305,7 +305,7 @@ public:
std::shared_ptr<FunctionBase> getFunction2() const { return function2_; }
protected:
protected:
// only init cpu argument, gpu argument copy from cpu argument.
void initArg(BufferArg& arg) {
Vector1 vector(arg.shape().getElements(), (real*)arg.data());
......@@ -381,7 +381,7 @@ protected:
}
}
protected:
protected:
std::shared_ptr<FunctionBase> function1_;
std::shared_ptr<FunctionBase> function2_;
std::vector<std::shared_ptr<Allocator1>> func1Memory_;
......@@ -400,7 +400,7 @@ protected:
class CpuGpuFuncCompare
: public Compare2Function<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> {
public:
public:
CpuGpuFuncCompare(const std::string& name, const FuncConfig& config)
: Compare2Function(name + "-CPU", name + "-GPU", config) {}
......
......@@ -24,7 +24,7 @@ namespace paddle {
*/
template <DeviceType Device>
class GemmConvFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......@@ -136,7 +136,7 @@ public:
*/
template <DeviceType Device>
class GemmConvMobileFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......@@ -297,7 +297,7 @@ public:
*/
template <DeviceType Device>
class GemmConvGradInputFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......@@ -404,7 +404,7 @@ public:
*/
template <DeviceType Device>
class GemmConvGradFilterFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......
......@@ -70,7 +70,7 @@ enum ColFormat { kCFO = 0, kOCF = 1 };
*/
template <ColFormat Format, DeviceType Device, class T>
class Im2ColFunctor {
public:
public:
void operator()(const T* imData,
const TensorShape& imShape,
T* colData,
......@@ -85,7 +85,7 @@ public:
template <ColFormat Format, DeviceType Device, class T>
class Col2ImFunctor {
public:
public:
void operator()(T* imData,
const TensorShape& imShape,
const T* colData,
......@@ -100,7 +100,7 @@ public:
template <class T>
class Im2ColMobileFunctor {
public:
public:
void operator()(const T* imData,
const TensorShape& imShape,
T* colData,
......
......@@ -23,7 +23,7 @@ namespace paddle {
*/
template <class T>
class Im2ColFunctor<kCFO, DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* imData,
const TensorShape& imShape,
T* colData,
......@@ -75,7 +75,7 @@ public:
*/
template <class T>
class Col2ImFunctor<kCFO, DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(T* imData,
const TensorShape& imShape,
const T* colData,
......@@ -130,7 +130,7 @@ template class Col2ImFunctor<kCFO, DEVICE_TYPE_CPU, double>;
*/
template <class T>
class Im2ColFunctor<kOCF, DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* imData,
const TensorShape& imShape,
T* colData,
......@@ -188,7 +188,7 @@ public:
*/
template <class T>
class Col2ImFunctor<kOCF, DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(T* imData,
const TensorShape& imShape,
const T* colData,
......
......@@ -71,7 +71,7 @@ __global__ void im2col(const T* data_im,
*/
template <class T>
class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* imData,
const TensorShape& imShape,
T* colData,
......@@ -184,7 +184,7 @@ __global__ void col2im(size_t n,
*/
template <class T>
class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(T* imData,
const TensorShape& imShape,
const T* colData,
......@@ -292,7 +292,7 @@ __global__ void im2colOCF(const T* imData,
*/
template <class T>
class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* imData,
const TensorShape& imShape,
T* colData,
......@@ -399,7 +399,7 @@ __global__ void col2imOCF(T* imData,
*/
template <class T>
class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(T* imData,
const TensorShape& imShape,
const T* colData,
......
......@@ -240,7 +240,7 @@ void MulOp<DEVICE_TYPE_CPU>(CpuMatrix& out,
*/
template <DeviceType Device>
class MulFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
aTrans_ = config.get<bool>("aTrans");
bTrans_ = config.get<bool>("bTrans");
......@@ -335,7 +335,7 @@ public:
}
}
private:
private:
bool aTrans_;
bool bTrans_;
};
......
......@@ -24,7 +24,7 @@ namespace paddle {
*/
template <class T>
class NaiveConvFunctor {
public:
public:
void operator()(const T* inputData,
size_t batchSize,
size_t inputChannels,
......@@ -85,7 +85,7 @@ public:
template <DeviceType Device>
class NaiveConvFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......
......@@ -132,7 +132,7 @@ static inline PadConf castToPadConf(const FuncConfig& conf) {
template <DeviceType Device>
class PadFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { pad_ = castToPadConf(config); }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -157,7 +157,7 @@ public:
pad_);
}
private:
private:
PadConf pad_;
};
......@@ -173,7 +173,7 @@ private:
template <DeviceType Device>
class PadGradFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { pad_ = castToPadConf(config); }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -201,7 +201,7 @@ public:
pad_);
}
private:
private:
PadConf pad_;
};
......
......@@ -129,7 +129,7 @@ void RowConvGrad<DEVICE_TYPE_CPU>(const CpuMatrix& outG,
template <DeviceType Device>
class RowConvFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -176,7 +176,7 @@ public:
template <DeviceType Device>
class RowConvGradFunc : public FunctionBase {
// TODO(qingqing): split into RowConvDataFunc and RowConvWeightFunc
public:
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......
......@@ -92,7 +92,7 @@ void ScaleSubRegionGrad<DEVICE_TYPE_CPU>(const real* inGrad,
*/
template <DeviceType Device>
class ScaleSubRegionFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -109,7 +109,7 @@ public:
conf_);
}
private:
private:
FuncConfig conf_;
};
......@@ -124,7 +124,7 @@ private:
template <DeviceType Device>
class ScaleSubRegionGradFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -141,7 +141,7 @@ public:
conf_);
}
private:
private:
FuncConfig conf_;
};
......
......@@ -75,7 +75,7 @@ void NHWC2NCHW<DEVICE_TYPE_CPU>(real* outputs,
*/
template <DeviceType Device>
class NCHW2NHWCFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......@@ -108,7 +108,7 @@ public:
*/
template <DeviceType Device>
class NHWC2NCHWFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
......
......@@ -22,7 +22,7 @@ namespace paddle {
* TensorShape used to represent shape of normal tensor.
*/
class TensorShape {
public:
public:
TensorShape() : ndims_(0), nelements_(0) { initDims(0); }
TensorShape(size_t ndims) : ndims_(ndims), nelements_(1) { initDims(ndims); };
......@@ -80,7 +80,7 @@ public:
bool operator!=(const TensorShape& t) const { return !(*this == t); }
private:
private:
// compute number of elements
void numElements() {
nelements_ = 1;
......
......@@ -21,7 +21,7 @@ namespace paddle {
template <DeviceType Device>
class NeonDepthwiseConvFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......
......@@ -21,7 +21,7 @@ namespace paddle {
template <DeviceType Device>
class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
......
......@@ -46,7 +46,7 @@ nnp_convolution_algorithm get_nnp_convolution_algorithm(
template <DeviceType Device>
class NNPACKConvFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
algorithm_ = get_nnp_convolution_algorithm(config.get<std::string>("algo"));
......@@ -231,7 +231,7 @@ public:
}
}
private:
private:
nnp_convolution_algorithm algorithm_;
nnp_convolution_transform_strategy transform_strategy_;
void* workspaceBuffer_;
......
......@@ -44,10 +44,10 @@ static ClassRegistrar<ActivationFunction> gActivationRegistrar;
*/
#define BEGIN_DEFINE_ACTIVATION(ACTIVATION_NAME) \
class ACTIVATION_CLASS_NAME(ACTIVATION_NAME) : public ActivationFunction { \
private: \
private: \
static const std::string name; \
\
public: \
public: \
const std::string& getName() const { return name; }
/**
* @def END_DEFINE_ACTIVATION
......@@ -70,7 +70,7 @@ static ClassRegistrar<ActivationFunction> gActivationRegistrar;
* Do nothing when forward/backward.
*/
class IdentityActivation : public ActivationFunction {
public:
public:
static const std::string name;
Error __must_check forward(Argument& act) {
(void)act;
......
......@@ -31,7 +31,7 @@ struct Argument;
*
*/
class ActivationFunction {
public:
public:
static ActivationFunction* create(const std::string& type);
static std::vector<std::string> getAllRegisteredTypes();
......
......@@ -35,10 +35,10 @@ static ClassRegistrar<ActivationFunction> gMKLDNNActivationRegistrar;
* @def END_MKLDNN_ACTIVATION
*/
#define END_MKLDNN_ACTIVATION(ACT_TYPE) \
private: \
private: \
static const std::string name; \
\
public: \
public: \
const std::string& getName() const { return name; } \
} \
; \
......@@ -63,11 +63,11 @@ public: \
#define DEFINE_MKLDNN_ELTWISE_ACTIVATION( \
ACT_TYPE, BASE_CLASS, ALPHA, BWD_ALPHA) \
BEGIN_MKLDNN_ACTIVATION(ACT_TYPE, BASE_CLASS) \
private: \
private: \
static const float alpha; \
static const float bwdAlpha; \
\
public: \
public: \
float getAlpha() const { return alpha; } \
float getBwdAlpha() const { return bwdAlpha; } \
END_MKLDNN_ACTIVATION(ACT_TYPE) \
......
......@@ -27,7 +27,7 @@ namespace paddle {
* including mkldnn_relu, mkldnn_elu, mkldnn_tanh, mkldnn_softmax
*/
class MKLDNNActivation : public ActivationFunction {
protected:
protected:
// input value element count
size_t cnt_;
// should not merge the resetBwd into resetFwd,
......@@ -43,7 +43,7 @@ protected:
std::vector<mkldnn::primitive> pipelineFwd_;
std::vector<mkldnn::primitive> pipelineBwd_;
public:
public:
MKLDNNActivation() : cnt_(0), needResetBwd_(true) {}
~MKLDNNActivation() {}
static ActivationFunction* create(const std::string& type);
......@@ -72,7 +72,7 @@ class MKLDNNEltwiseActivation : public MKLDNNActivation {
typedef mkldnn::eltwise_backward eltwise_bwd;
typedef mkldnn::algorithm algorithm;
protected:
protected:
// save the forward primitive desc, which can be used backward
std::shared_ptr<eltwise_fwd::primitive_desc> fwdPD_;
// eltwise_bwd need src input value
......@@ -80,7 +80,7 @@ protected:
// use for copy data
std::shared_ptr<mkldnn::reorder> copyInVal_;
public:
public:
MKLDNNEltwiseActivation() {}
~MKLDNNEltwiseActivation() {}
virtual const std::string& getName() const = 0;
......@@ -102,12 +102,12 @@ public:
class MKLDNNSoftmaxActivation : public MKLDNNActivation {
typedef mkldnn::softmax_forward softmax_fwd;
private:
private:
// for backward
MatrixPtr sftMaxSum_;
MatrixPtr sftMaxDot_;
public:
public:
MKLDNNSoftmaxActivation() {}
~MKLDNNSoftmaxActivation() {}
virtual const std::string& getName() const = 0;
......
......@@ -71,7 +71,7 @@ typedef std::shared_ptr<BufferBatch> BufferBatchPtr;
* @brief Data for batch training a neural network
*/
class DataBatch {
public:
public:
DataBatch() : size_(0) { data_.clear(); }
/**
* @brief Get batch size
......@@ -181,7 +181,7 @@ public:
}
}
protected:
protected:
/**
* @brief batch size
*/
......@@ -194,7 +194,7 @@ protected:
};
class BufferBatch {
public:
public:
BufferBatch() {
hlStream_ = HPPL_STREAM_DEFAULT;
hlEvent_ = NULL;
......@@ -235,7 +235,7 @@ public:
void swap(BufferBatch* bufBatch);
void clone(DataBatch* srcBatch, bool useGpu);
protected:
protected:
DataBatch* batchData_;
hl_stream_t hlStream_;
hl_event_t hlEvent_;
......@@ -247,7 +247,7 @@ typedef std::shared_ptr<DataProvider> DataProviderPtr;
typedef Queue<BufferBatch*> BufferBatchQueue;
class DoubleBuffer {
public:
public:
DoubleBuffer(DataProvider* dataPool, bool useGpu, int64_t batchSize = 0);
virtual ~DoubleBuffer();
void removeOneBatch(DataBatch* dataBatch);
......@@ -267,7 +267,7 @@ public:
void setPending(bool pending) { pending_ = pending; }
protected:
protected:
virtual void asyncLoadBatch();
void insertOneBatch(DataBatch* batch);
......@@ -290,7 +290,7 @@ protected:
* one is for input, one is for label.
*/
class DataProvider {
public:
public:
static ClassRegistrar<DataProvider, DataConfig, ModelConfig, bool> registrar_;
static DataProvider* create(const DataConfig& config,
const ModelConfig& modelConfig,
......@@ -359,7 +359,7 @@ public:
*/
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch) = 0;
protected:
protected:
DataConfig config_;
bool skipShuffle_;
float usageRatio_;
......@@ -382,7 +382,7 @@ protected:
* necessary configurations such as stream_names
*/
class DummyDataProvider : public DataProvider {
public:
public:
DummyDataProvider(const DataConfig& config, bool useGpu)
: DataProvider(config, useGpu) {}
virtual void shuffle() {}
......@@ -399,7 +399,7 @@ public:
* Data provider for one input and one integer label.
*/
class SimpleDataProviderBase : public DataProvider {
protected:
protected:
/// sample feature dimension
int64_t sampleDim_;
/// the number of samples
......@@ -425,7 +425,7 @@ protected:
RWLock lock_;
public:
public:
SimpleDataProviderBase(const DataConfig& config, bool useGpu, bool withInfo);
~SimpleDataProviderBase() {}
......@@ -440,7 +440,7 @@ public:
/// return the number of samples in the buffer
int64_t fillBuffer();
protected:
protected:
/**
* @brief Fill at most size samples into data and label.
*
......@@ -458,12 +458,12 @@ protected:
};
class SimpleDataProvider : public SimpleDataProviderBase {
public:
public:
SimpleDataProvider(const DataConfig& config, bool useGpu);
~SimpleDataProvider();
virtual void reset();
protected:
protected:
void loadData(const std::string& fileName);
void loadDataFile(const std::string& fileName);
virtual int64_t fillBufferImp(real* data,
......@@ -471,7 +471,7 @@ protected:
int* info,
int64_t size);
protected:
protected:
size_t currentSampleIndex_;
std::vector<int> labels_;
std::vector<real> data_;
......
......@@ -20,7 +20,7 @@ namespace paddle {
template <class T>
class DataProviderGroup : public DataProvider {
protected:
protected:
typedef T ProviderType;
typedef std::shared_ptr<ProviderType> ProviderPtrType;
ProviderPtrType provider_;
......@@ -29,7 +29,7 @@ protected:
std::mutex lock_;
std::unique_ptr<MultiThreadWorker<ProviderType>> loader_;
public:
public:
DataProviderGroup(const DataConfig& config, bool useGpu);
~DataProviderGroup() {}
......@@ -38,7 +38,7 @@ public:
virtual int64_t getSize() { return -1; }
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
private:
private:
void startLoader();
void stopLoader();
void forceStopLoader();
......
......@@ -19,10 +19,10 @@ limitations under the License. */
namespace paddle {
class MultiDataProvider : public DataProvider {
protected:
protected:
std::vector<std::unique_ptr<DataProvider>> subDataProviders_;
public:
public:
MultiDataProvider(const DataConfig& config,
const ModelConfig& modelConfig,
bool useGpu);
......@@ -33,7 +33,7 @@ public:
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
bool isTestMode() const { return isTestMode_; }
private:
private:
int totalDataRatio_;
bool isTestMode_;
};
......
......@@ -28,7 +28,7 @@ namespace paddle {
* messages from/to i/ostream.
*/
class ProtoReader {
public:
public:
explicit ProtoReader(std::istream* s, bool dataCompression = false) {
CHECK(s) << "istream pointer is nullptr";
istreamInput_.reset(new google::protobuf::io::IstreamInputStream(s));
......@@ -109,7 +109,7 @@ public:
return true;
}
protected:
protected:
std::unique_ptr<google::protobuf::io::ZeroCopyInputStream> istreamInput_;
std::unique_ptr<google::protobuf::io::GzipInputStream> gzipInput_;
std::unique_ptr<google::protobuf::io::CodedInputStream> codedInput_;
......@@ -144,7 +144,7 @@ protected:
};
class ProtoWriter {
public:
public:
explicit ProtoWriter(std::ostream* s, bool dataCompression = false) {
CHECK(s) << "ostream pointer is nullptr";
ostreamOutput_.reset(new google::protobuf::io::OstreamOutputStream(s));
......@@ -168,7 +168,7 @@ public:
return ret;
}
protected:
protected:
std::unique_ptr<google::protobuf::io::ZeroCopyOutputStream> ostreamOutput_;
std::unique_ptr<google::protobuf::io::GzipOutputStream> gzipOutput_;
std::unique_ptr<google::protobuf::io::CodedOutputStream> codedOutput_;
......
......@@ -23,7 +23,7 @@ limitations under the License. */
namespace paddle {
class PyDataProvider : public DataProvider {
public:
public:
PyDataProvider(const DataConfig& config,
bool useGpu,
bool loadDataAll = true);
......@@ -40,7 +40,7 @@ public:
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
protected:
protected:
struct ProtoSlot;
// return false if each each sample is one sequence, i.e., independent
// of other samples.
......@@ -73,7 +73,7 @@ protected:
void resetSlots();
void loadData(const std::vector<std::string>& fileList);
protected:
protected:
struct ProtoSlot {
SlotDef::SlotType type;
int dim;
......
......@@ -93,7 +93,7 @@ inline std::ostream& operator<<(std::ostream& os, const SlotHeader& header) {
* prepare step, fill data into argument during fill step.
*/
class IFieldScanner {
public:
public:
DISABLE_COPY(IFieldScanner);
/**
* Ctor.
......@@ -146,7 +146,7 @@ public:
*/
static IFieldScanner* create(SlotHeader* header);
protected:
protected:
SlotHeader* headerPtr_;
};
......@@ -154,7 +154,7 @@ protected:
* Py Data Provider Cache Interface.
*/
class IPyDataProviderCache {
public:
public:
virtual ~IPyDataProviderCache() {}
/**
......@@ -193,7 +193,7 @@ public:
* data. And it support cache strategies.
*/
class PyDataProvider2 : public DataProvider {
public:
public:
/**
* Ctor
*/
......@@ -234,7 +234,7 @@ public:
*/
virtual ~PyDataProvider2() { resetImpl(false); }
private:
private:
void createPyDataObj(const std::string& model,
const std::string& className,
const std::string& fileListName,
......@@ -435,7 +435,7 @@ private:
exit_ = false;
}
private:
private:
std::unique_ptr<std::thread> loadThread_;
std::atomic<bool> exit_;
std::deque<PyObjectPtr> callingContexts_;
......@@ -461,7 +461,7 @@ private:
static PyObjectPtr zeroTuple_;
class PositionRandom {
public:
public:
inline explicit PositionRandom(bool skipRand)
: eng_(ThreadLocalRandomEngine::get()), skipRand_(skipRand) {}
......@@ -476,14 +476,14 @@ private:
}
}
private:
private:
std::default_random_engine& eng_;
std::unique_ptr<std::uniform_int_distribution<size_t>> dist_;
bool skipRand_;
};
// DataProvider interface
public:
public:
/**
* Resetting the PyDataProvider. May start reading thread here.
*/
......@@ -666,7 +666,7 @@ REGISTER_DATA_PROVIDER_EX(py2, PyDataProvider2);
* Scanner for dense slot.
*/
class DenseScanner : public IFieldScanner {
public:
public:
explicit DenseScanner(SlotHeader* ptr) : IFieldScanner(ptr), height_(0) {}
/**
......@@ -708,7 +708,7 @@ public:
++height_;
}
private:
private:
size_t height_;
};
......@@ -716,7 +716,7 @@ private:
* Scanner for index slot
*/
class IndexScanner : public IFieldScanner {
public:
public:
explicit IndexScanner(SlotHeader* ptr) : IFieldScanner(ptr), cnt_(0) {}
/**
......@@ -740,12 +740,12 @@ public:
CHECK(ok) << "Cannot cast int " << py::repr(obj);
}
private:
private:
size_t cnt_;
};
class SparseNonValueScanner : public IFieldScanner {
public:
public:
explicit SparseNonValueScanner(SlotHeader* ptr)
: IFieldScanner(ptr), nnz_(0), height_(0) {}
......@@ -790,7 +790,7 @@ public:
++height_;
}
protected:
protected:
/**
* Set a single sparse index and value.
* @param [out] col sparse index
......@@ -809,7 +809,7 @@ protected:
};
class SparseValueScanner : public SparseNonValueScanner {
public:
public:
explicit SparseValueScanner(SlotHeader* ptr) : SparseNonValueScanner(ptr) {}
virtual void finishPrepare(Argument& argument) {
......@@ -817,7 +817,7 @@ public:
argument.value, height_, headerPtr_->dim, nnz_, FLOAT_VALUE);
}
protected:
protected:
virtual void setData(int* col, real* dat, PyObject* obj) {
py::SequenceHelper s(obj);
SparseNonValueScanner::setData(col, dat, s[0]);
......@@ -829,7 +829,7 @@ protected:
* Sequence Scanner. Scanner for sequence or sub-sequence.
*/
class SequenceScanner : public IFieldScanner {
public:
public:
/**
* Ctor
* @param innerScanner inner scanner for each timestep or sub-sequence.
......@@ -902,7 +902,7 @@ public:
*/
virtual void finishFill(Argument& argument) { inner_->finishFill(argument); }
protected:
protected:
size_t getSize(PyObject* obj) {
py::SequenceHelper s(obj);
auto sc = dynamic_cast<SequenceScanner*>(inner_.get());
......@@ -917,7 +917,7 @@ protected:
}
}
private:
private:
std::unique_ptr<IFieldScanner> inner_;
size_t cnt_;
std::function<ICpuGpuVectorPtr&(Argument&)> getSeqStartPos_;
......@@ -969,7 +969,7 @@ IFieldScanner* IFieldScanner::create(SlotHeader* header) {
* python every pass.
*/
class NoCacheStrategy : public IPyDataProviderCache {
public:
public:
virtual bool reset() { return true; }
virtual void drop(std::deque<PyObjectPtr>* data) { data->clear(); }
......@@ -984,7 +984,7 @@ public:
* The rest passes, will load data from memory.
*/
class CacheOnePassInMemory : public IPyDataProviderCache {
public:
public:
CacheOnePassInMemory()
: objPool_(new std::deque<PyObjectPtr>()),
droppedPool_(new std::deque<PyObjectPtr>()) {}
......@@ -1011,7 +1011,7 @@ public:
virtual std::deque<PyObjectPtr>* load() { return objPool_.get(); }
private:
private:
std::unique_ptr<std::deque<PyObjectPtr>> objPool_;
std::unique_ptr<std::deque<PyObjectPtr>> droppedPool_;
};
......
......@@ -22,7 +22,7 @@ namespace paddle {
* calculate sequence-to-sequence edit distance
*/
class CTCErrorEvaluator : public Evaluator {
private:
private:
MatrixPtr outActivations_;
int numTimes_, numClasses_, numSequences_, blank_;
real deletions_, insertions_, substitutions_;
......@@ -197,7 +197,7 @@ private:
(real)seqClassficationError_ / numSequences_;
}
public:
public:
CTCErrorEvaluator()
: numTimes_(0),
numClasses_(0),
......
......@@ -77,7 +77,7 @@ class ChunkEvaluator : public Evaluator {
std::set<int> excludedChunkTypes_;
mutable std::unordered_map<std::string, real> values_;
public:
public:
virtual void init(const EvaluatorConfig& config) {
Evaluator::init(config);
if (config.chunk_scheme() == "IOB") {
......@@ -276,7 +276,7 @@ public:
return "chunk";
}
private:
private:
void storeLocalValues() const {
CHECK_GE(numOutputSegments_, 0);
CHECK_GE(numLabelSegments_, 0);
......
......@@ -28,7 +28,7 @@ namespace paddle {
* The config file api is detection_map_evaluator.
*/
class DetectionMAPEvaluator : public Evaluator {
public:
public:
DetectionMAPEvaluator()
: evaluateDifficult_(false), cpuOutput_(nullptr), cpuLabel_(nullptr) {}
......@@ -132,7 +132,7 @@ public:
LOG(FATAL) << "Distribute detection evaluation not implemented.";
}
protected:
protected:
void calcTFPos(const size_t batchSize,
const vector<map<size_t, vector<NormalizedBBox>>>& allGTBBoxes,
const vector<map<size_t, vector<pair<real, NormalizedBBox>>>>&
......@@ -287,7 +287,7 @@ protected:
real getValueImpl() const { return calcMAP(); }
private:
private:
real overlapThreshold_; // overlap threshold when determining whether matched
bool evaluateDifficult_; // whether evaluate difficult ground truth
size_t backgroundId_; // class index of background
......
......@@ -38,7 +38,7 @@ void Evaluator::eval(const NeuralNetwork& nn) {
* The config file api is classification_error_evaluator.
*/
class ClassificationErrorEvaluator : public Evaluator {
public:
public:
/*
ClassificationErrorEvaluator() : totalScore2_(0) {}
......@@ -124,7 +124,7 @@ public:
}
// Evaluator interface
protected:
protected:
std::string getTypeImpl() const { return "classification_error"; }
};
......@@ -135,7 +135,7 @@ protected:
*/
class SequenceClassificationErrorEvaluator
: public ClassificationErrorEvaluator {
public:
public:
virtual void updateSamplesNum(const std::vector<Argument>& arguments) {
numSamples_ += arguments[0].getNumSequences();
}
......@@ -166,7 +166,7 @@ public:
}
// Evaluator interface
protected:
protected:
std::string getTypeImpl() const { return "seq_classification_error"; }
};
REGISTER_EVALUATOR(seq_classification_error,
......@@ -178,7 +178,7 @@ REGISTER_EVALUATOR(seq_classification_error,
* The config file api is sum_evaluator.
*/
class SumEvaluator : public Evaluator {
public:
public:
SumEvaluator() : cpuLabel_(nullptr), cpuWeight_(nullptr) {}
virtual void updateSamplesNum(const std::vector<Argument>& arguments) {
......@@ -255,12 +255,12 @@ public:
mergeResultsOfAllClients(client);
}
private:
private:
IVectorPtr cpuLabel_;
MatrixPtr cpuWeight_;
// Evaluator interface
protected:
protected:
std::string getTypeImpl() const { return "sum"; }
};
/**
......@@ -274,7 +274,7 @@ protected:
*
*/
class ColumnSumEvaluator : public Evaluator {
public:
public:
explicit ColumnSumEvaluator(int32_t colIdx)
: colIdx_(colIdx), colNum_(0), sum_(nullptr) {}
......@@ -368,13 +368,13 @@ public:
client->reduce(&numSamples_, &numSamples_, 1, FLAGS_trainer_id, 0);
}
private:
private:
int32_t colIdx_;
size_t colNum_;
MatrixPtr sum_; /* cpu matrix */
// Evaluator interface
protected:
protected:
std::string getTypeImpl() const {
if (colIdx_ == -1)
return "last-column-sum";
......@@ -1018,7 +1018,7 @@ static InitFunction __reg_type_auc_sum__([]() {
* The config file api is value_printer_evaluator.
*/
class ValuePrinter : public NotGetableEvaluator {
public:
public:
virtual void eval(const NeuralNetwork& nn) {
for (const std::string& name : config_.input_layers()) {
nn.getLayer(name)->getOutput().printValueString(LOG(INFO),
......@@ -1038,7 +1038,7 @@ REGISTER_EVALUATOR(value_printer, ValuePrinter);
* The config file api is gradient_printer_evaluator.
*/
class GradientPrinter : public NotGetableEvaluator {
public:
public:
virtual void eval(const NeuralNetwork& nn) {
for (const std::string& name : config_.input_layers()) {
const Argument& argu = nn.getLayer(name)->getOutput();
......@@ -1061,11 +1061,11 @@ REGISTER_EVALUATOR(gradient_printer, GradientPrinter);
* The config file api is maxid_printer_evaluator.
*/
class MaxIdPrinter : public NotGetableEvaluator {
private:
private:
IVectorPtr maxIds_;
MatrixPtr maxValues_;
public:
public:
MaxIdPrinter() {}
virtual void eval(const NeuralNetwork& nn) {
......@@ -1103,12 +1103,12 @@ REGISTER_EVALUATOR(max_id_printer, MaxIdPrinter);
* The config file api is maxframe_printer_evaluator.
*/
class MaxFramePrinter : public NotGetableEvaluator {
private:
private:
IVectorPtr maxIds_;
MatrixPtr maxValues_;
MatrixPtr value_;
public:
public:
MaxFramePrinter() {
value_ =
Matrix::create(nullptr, /* height= */ 1, 1, /* trans= */ false, false);
......@@ -1190,7 +1190,7 @@ REGISTER_EVALUATOR(max_frame_printer, MaxFramePrinter);
*
*/
class SequenceTextPrinter : public NotGetableEvaluator {
private:
private:
/// dict_file, which contains a list of tokens
std::vector<std::string> dict_;
/// result_file, which is the output file
......@@ -1203,7 +1203,7 @@ private:
/// store the probability associated with each sequence
std::vector<MatrixPtr> cpuIn_;
public:
public:
SequenceTextPrinter() {}
virtual void init(const EvaluatorConfig& config) {
......@@ -1334,7 +1334,7 @@ REGISTER_EVALUATOR(seq_text_printer, SequenceTextPrinter);
* The config file api is classification_error_printer_evaluator.
*/
class ClassificationErrorPrinter : public ClassificationErrorEvaluator {
public:
public:
virtual void updateSamplesNum(const std::vector<Argument>& arguments) {}
virtual real evalImp(std::vector<Argument>& arguments) {
......
......@@ -40,7 +40,7 @@ class NeuralNetwork;
* has been by a trained model.
*/
class Evaluator {
public:
public:
static Evaluator* create(const EvaluatorConfig& config);
Evaluator() : numSamples_(0), totalScore_(0) {}
......@@ -172,7 +172,7 @@ public:
return this->getTypeImpl();
}
protected:
protected:
/**
* @brief getValueImpl The simplest way to define getValue result. If this
* evaluator doesn't contain multiple fields, and do not throw any error, just
......@@ -191,7 +191,7 @@ protected:
*/
virtual std::string getTypeImpl() const { return "base"; }
protected:
protected:
EvaluatorConfig config_;
double numSamples_;
double totalScore_;
......@@ -204,7 +204,7 @@ protected:
*/
class NotGetableEvaluator : public Evaluator {
// Evaluator interface
public:
public:
void getNames(std::vector<std::string>* names) {}
real getValue(const std::string& name, Error* err) const {
......@@ -219,7 +219,7 @@ public:
};
class DummyEvaluator : public Evaluator {
public:
public:
DummyEvaluator() {}
virtual void init(const EvaluatorConfig&) {}
virtual void start() {}
......@@ -232,7 +232,7 @@ public:
virtual void printStats(std::ostream&) const {}
// Evaluator interface
protected:
protected:
std::string getTypeImpl() const;
};
/**
......@@ -251,7 +251,7 @@ protected:
*
*/
class AucEvaluator : public Evaluator {
public:
public:
AucEvaluator(int32_t colIdx)
: colIdx_(colIdx),
realColumnIdx_(0),
......@@ -269,7 +269,7 @@ public:
virtual void distributeEval(ParameterClient2* client);
private:
private:
static const uint32_t kBinNum_ = (1 << 24) - 1;
static const int kNegativeLabel_ = 0;
double statPos_[kBinNum_ + 1];
......@@ -292,7 +292,7 @@ private:
double calcAuc() const;
// Evaluator interface
protected:
protected:
real getValueImpl() const;
std::string getTypeImpl() const;
};
......@@ -305,7 +305,7 @@ protected:
* dense value.
*/
class RankAucEvaluator : public Evaluator {
public:
public:
// evaluate ranking AUC
virtual void start();
......@@ -317,7 +317,7 @@ public:
mergeResultsOfAllClients(client);
}
private:
private:
MatrixPtr output_;
MatrixPtr click_;
MatrixPtr pv_;
......@@ -329,7 +329,7 @@ private:
size_t size);
// Evaluator interface
protected:
protected:
std::string getTypeImpl() const;
};
......@@ -344,7 +344,7 @@ protected:
* The config file api is precision_recall_evaluator.
*/
class PrecisionRecallEvaluator : public Evaluator {
public:
public:
// Evaluate precision, recall and F1 score
PrecisionRecallEvaluator()
: isMultiBinaryLabel_(false),
......@@ -379,7 +379,7 @@ public:
StatsInfo() : TP(0.0), TN(0.0), FP(0.0), FN(0.0) {}
};
private:
private:
bool isMultiBinaryLabel_;
std::vector<StatsInfo> statsInfo_;
......@@ -444,7 +444,7 @@ private:
* The config file api is pnpair_evaluator.
*/
class PnpairEvaluator : public Evaluator {
public:
public:
PnpairEvaluator()
: cpuOutput_(nullptr),
cpuLabel_(nullptr),
......@@ -491,7 +491,7 @@ public:
<< " calc total neg pair: " << pairArray_[1];
}
private:
private:
static const uint32_t kPairArrayNum_ = 2;
double pairArray_[kPairArrayNum_];
MatrixPtr cpuOutput_;
......@@ -500,7 +500,7 @@ private:
MatrixPtr cpuWeight_;
// Evaluator interface
protected:
protected:
real getValueImpl() const {
return pairArray_[0] / ((pairArray_[1] <= 0) ? 1.0 : pairArray_[1]);
}
......
......@@ -73,7 +73,7 @@ class GradientMachine;
typedef std::shared_ptr<GradientMachine> GradientMachinePtr;
class GradientMachine {
public:
public:
enum CreateMode {
kNormal = 0,
kSgdSparseCpuTraining = 3,
......@@ -240,7 +240,7 @@ public:
*/
virtual void releaseOutput() {}
protected:
protected:
virtual void onLoadParameter() {}
std::vector<ParameterPtr> parameters_;
......
......@@ -19,14 +19,14 @@ limitations under the License. */
namespace paddle {
class IGradientMachineMode {
public:
public:
virtual ~IGradientMachineMode() {}
public: // interfaces
/**
* @brief create current mode's gradient machine by model config.
* @param config model config
*/
public: // interfaces
/**
* @brief create current mode's gradient machine by model config.
* @param config model config
*/
virtual GradientMachine* create(const ModelConfig& config) = 0;
/**
......@@ -55,14 +55,14 @@ public: // interfaces
*/
virtual bool needTrainWholeDataInOneBatch() const = 0;
public: // static methods.
/**
* @brief register a custom gradient machine mode.
* @note For user to register a custom gradient machine mode, id should >=
* kCustom.
* @param mode mode id.
* @param ptr mode description object.
*/
public: // static methods.
/**
* @brief register a custom gradient machine mode.
* @note For user to register a custom gradient machine mode, id should >=
* kCustom.
* @param mode mode id.
* @param ptr mode description object.
*/
static void regGradientMachineMode(
int32_t mode, std::unique_ptr<IGradientMachineMode>&& ptr) {
modes_.insert(std::make_pair(mode, std::move(ptr)));
......@@ -141,7 +141,7 @@ public: // static methods.
}
}
private:
private:
static std::unordered_map<int32_t, std::unique_ptr<IGradientMachineMode>>
modes_;
};
......
......@@ -166,7 +166,7 @@ struct GradBuffer {
* the merged gradient to parameter server.
*/
class MultiGradientMachine : public GradientMachine {
public:
public:
enum TaskType {
TASK_FORWARD_BACKWARD = 0,
TASK_FORWARD = 1,
......@@ -213,7 +213,7 @@ public:
/// The gradietns will be copied to each thread in the computing threads.
virtual void setOutputGrad(const std::vector<Argument>& args);
protected:
protected:
friend class TrainerThread;
std::vector<TrainerThreadPtr>& getAllThreads() { return threads_; }
......@@ -281,7 +281,7 @@ protected:
int paraMainThread(int pid) const { return paraMainThread_[pid]; }
protected:
protected:
virtual void forwardImp(const std::vector<Argument>& inArgs,
std::vector<Argument>* outArgs,
PassType passType,
......@@ -298,7 +298,7 @@ protected:
void allocGradBufs();
protected:
protected:
bool useGpu_;
bool hasNonstaticCpuParamters_;
......@@ -342,7 +342,7 @@ protected:
};
class TrainerThread {
public:
public:
TrainerThread(const ModelConfig& config,
int threadId,
MultiGradientMachine* multiMachine);
......@@ -392,7 +392,7 @@ public:
/// Whether the thread has input data.
bool hasInputData() { return batchSize_ != 0; }
protected:
protected:
void mergeCpuGradients();
void mergeGradSparse(
......@@ -421,7 +421,7 @@ protected:
/// GradientMachine::backward
void doCallback(int pid);
protected:
protected:
MultiGradientMachine* multiMachine_;
ModelConfig config_;
/// whether the thread should stop
......
......@@ -122,7 +122,7 @@ void MultiNetwork::finish() {
}
class MultiCombinedEvaluator : public Evaluator {
public:
public:
MultiCombinedEvaluator() {}
void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) {
evaluators_.emplace_back(std::move(evaluator));
......@@ -167,7 +167,7 @@ public:
}
}
protected:
protected:
std::vector<std::unique_ptr<Evaluator>> evaluators_;
};
......
......@@ -22,7 +22,7 @@ limitations under the License. */
namespace paddle {
class MultiNetwork : public NeuralNetwork {
public:
public:
explicit MultiNetwork(std::string subModelName = "")
: NeuralNetwork(subModelName) {}
......@@ -58,7 +58,7 @@ public:
virtual void finish();
protected:
protected:
std::vector<std::unique_ptr<NeuralNetwork>> subNetworks_;
};
} // namespace paddle
......@@ -362,7 +362,7 @@ void NeuralNetwork::releaseOutput() {
#ifndef PADDLE_MOBILE_INFERENCE
class CombinedEvaluator : public Evaluator {
public:
public:
void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) {
evaluators_.emplace_back(std::move(evaluator));
}
......@@ -400,11 +400,11 @@ public:
}
}
protected:
protected:
std::vector<std::unique_ptr<Evaluator>> evaluators_;
// Evaluator interface
public:
public:
/**
* @brief getNames will return all inside evaluators' names.
* @param names [out]: return names.
......@@ -435,7 +435,7 @@ public:
});
}
private:
private:
template <typename T>
T getMethodHelper(const std::string& name,
Error* err,
......@@ -454,7 +454,7 @@ private:
};
class SubnetEvaluator : public CombinedEvaluator {
public:
public:
SubnetEvaluator(const std::string& layerName,
std::unique_ptr<Evaluator>&& evaluator)
: layerName_(layerName) {
......@@ -473,7 +473,7 @@ public:
<< " in submodel " << nn.getName();
}
protected:
protected:
std::string layerName_;
};
......
......@@ -56,7 +56,7 @@ void parameterInitNN(int paramId,
std::vector<ParameterPtr>* sharedParams);
class NeuralNetwork : public GradientMachine {
public:
public:
virtual void init(const ModelConfig& config,
ParamInitCallback callback = nullptr,
const std::vector<ParameterType>& parameterTypes =
......@@ -144,7 +144,7 @@ public:
*/
void releaseOutput();
protected:
protected:
/**
* The constructor of NeuralNetwork.
* The sub networks can get parameters_ and parameterMap_
......
......@@ -32,7 +32,7 @@ enum TaskType {
* multiple threads in parallel.
*/
class ParallelNeuralNetwork : public NeuralNetwork {
public:
public:
ParallelNeuralNetwork(std::string subModelName = "",
NeuralNetwork *rootNetwork = nullptr)
: NeuralNetwork(subModelName, rootNetwork) {}
......@@ -66,7 +66,7 @@ public:
// virtual void eval(Evaluator* evaluator);
protected:
protected:
bool useGpu_;
/// number of gpu devices
int numDevices_;
......@@ -74,7 +74,7 @@ protected:
};
class ParallelThread {
public:
public:
ParallelThread(int threadId, int deviceId, bool useGpu);
~ParallelThread();
void jobEnqueue(LayerPtr layer, TaskType task);
......@@ -87,10 +87,10 @@ public:
}
void setForwardPassType(PassType passType) { passType_ = passType; }
protected:
protected:
void computeThread();
public:
public:
struct Job {
LayerPtr layer_;
TaskType task_;
......@@ -98,7 +98,7 @@ public:
typedef Queue<Job> JobQueue;
JobQueue queue_;
protected:
protected:
/// from 0 to threads-1
int threadId_;
/// the GPU device Id which the computeThread_ used
......
......@@ -96,7 +96,7 @@ static InitFunction __init__diy_prob_method(
std::numeric_limits<int>::max());
class BeamSearchControlCallbacks {
public:
public:
RecurrentGradientMachine::BeamSearchCandidatesAdjustCallback
beamSearchCandidateAdjust;
RecurrentGradientMachine::NormOrDropNodeCallback normOrDropNode;
......@@ -115,7 +115,7 @@ public:
};
class BeamSearchStatisticsCallbacks {
public:
public:
RecurrentGradientMachine::EachStepCallback onEachStepStarted;
RecurrentGradientMachine::EachStepCallback onEachStepStoped;
......@@ -148,11 +148,11 @@ RecurrentGradientMachine::RecurrentGradientMachine(
* so it's should not be placed in root network.
*/
class BootBiasLayer : public Layer {
protected:
protected:
std::unique_ptr<Weight> biases_;
IVectorPtr cpuIds_;
public:
public:
explicit BootBiasLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
......
......@@ -30,7 +30,7 @@ class BeamSearchControlCallbacks;
class BeamSearchStatisticsCallbacks;
class RecurrentGradientMachine : public NeuralNetwork {
public:
public:
RecurrentGradientMachine(const std::string& subModelName,
NeuralNetwork* rootNetwork);
......@@ -290,7 +290,7 @@ public:
return this->finalPaths_;
}
protected:
protected:
std::vector<Argument::SeqInfo> commonSeqInfo_;
ICpuGpuVectorPtr sequenceStartPositions_;
void calcSequenceStartPositions();
......@@ -447,7 +447,7 @@ protected:
MatrixPtr cpuProb_;
IVectorPtr cpuEos_;
private:
private:
/*
* @return beam size in beam search
*/
......
......@@ -33,10 +33,10 @@ namespace paddle {
* The config file api is addto_layer.
*/
class AddtoLayer : public Layer {
protected:
protected:
std::unique_ptr<Weight> biases_;
public:
public:
explicit AddtoLayer(const LayerConfig& config) : Layer(config) {}
~AddtoLayer() {}
......
......@@ -26,11 +26,11 @@ namespace paddle {
* called to set one and only one real layer
*/
class AgentLayer : public Layer {
protected:
protected:
LayerPtr realLayer_;
int numSamples_;
public:
public:
explicit AgentLayer(const LayerConfig& config) : Layer(config) {}
~AgentLayer() {}
......@@ -55,14 +55,14 @@ public:
* GatherAgentLayer collect a complete sequence.
*/
class GatherAgentLayer : public Layer {
protected:
protected:
std::vector<LayerPtr> realLayers_;
std::vector<IVectorPtr> idsVec_;
// we don't clear idsVec_ vector to aviod IVector alloc/free
IVectorPtr allIds_;
std::vector<int> idIndex_;
public:
public:
explicit GatherAgentLayer(const LayerConfig& config) : Layer(config) {}
virtual ~GatherAgentLayer() {}
......@@ -95,7 +95,7 @@ public:
* if it is, the agent will select a few ids in real layer.
*/
class ScatterAgentLayer : public Layer {
protected:
protected:
LayerPtr realLayer_;
IVectorPtr ids_;
IVectorPtr cpuIds_;
......@@ -113,7 +113,7 @@ protected:
// true for setRealLayer, false for setRealLayerAndOutput
bool selectionMode_;
public:
public:
explicit ScatterAgentLayer(const LayerConfig& config) : Layer(config) {}
virtual ~ScatterAgentLayer() {}
......
......@@ -37,7 +37,7 @@ namespace paddle {
* The config file api is pooling_layer.
*/
class AverageLayer : public SequencePoolLayer {
public:
public:
enum AverageStrategy { kAverage = 0, kSum = 1, kAverageSquareRootN = 2 };
explicit AverageLayer(const LayerConfig& config)
: SequencePoolLayer(config) {}
......@@ -48,7 +48,7 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
protected:
protected:
int mode_;
};
} // namespace paddle
......@@ -40,7 +40,7 @@ namespace paddle {
*/
class BatchNormBaseLayer : public Layer {
public:
public:
explicit BatchNormBaseLayer(const LayerConfig& config) : Layer(config) {}
~BatchNormBaseLayer() {}
......@@ -61,7 +61,7 @@ public:
*/
void calFeatureMapSize();
protected:
protected:
/// Batch normalization scale parameter, which is referred to as gamma in
/// in original paper.
std::unique_ptr<Weight> weight_;
......
......@@ -27,7 +27,7 @@ namespace paddle {
*/
class BatchNormalizationLayer : public BatchNormBaseLayer {
public:
public:
explicit BatchNormalizationLayer(const LayerConfig& config)
: BatchNormBaseLayer(config), firstTest_(true) {}
......@@ -38,7 +38,7 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
protected:
protected:
/// Load pre-calculated mean and std.
void setMeanAndStd();
......
......@@ -26,13 +26,13 @@ namespace paddle {
* @note The config file api is bilinear_interp_layer.
*/
class BilinearInterpLayer : public Layer {
protected:
protected:
size_t outImgH_, outImgW_;
size_t inImgH_, inImgW_;
real ratioH_, ratioW_;
size_t numChannels_;
public:
public:
explicit BilinearInterpLayer(const LayerConfig& config) : Layer(config) {}
virtual ~BilinearInterpLayer() {}
......
......@@ -40,7 +40,7 @@ namespace paddle {
* The config file api is block_expand_layer.
*/
class BlockExpandLayer : public Layer {
protected:
protected:
/**
* @brief Calculate outputH_ and outputW_ and return block number which
* actually is time steps.
......@@ -53,7 +53,7 @@ protected:
TensorShape inputShape_;
TensorShape outputShape_;
public:
public:
explicit BlockExpandLayer(const LayerConfig& config) : Layer(config) {}
~BlockExpandLayer() {}
......
......@@ -30,14 +30,14 @@ namespace paddle {
* See LinearChainCRF.h for the detail of the CRF formulation.
*/
class CRFDecodingLayer : public CRFLayer {
public:
public:
explicit CRFDecodingLayer(const LayerConfig& config) : CRFLayer(config) {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
protected:
protected:
std::unique_ptr<LinearChainCRF> crf_;
};
......
......@@ -27,14 +27,14 @@ namespace paddle {
* See class LinearChainCRF for the detail of the CRF formulation.
*/
class CRFLayer : public Layer {
public:
public:
explicit CRFLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
protected:
protected:
size_t numClasses_;
ParameterPtr parameter_;
std::vector<LinearChainCRF> crfs_;
......
......@@ -20,7 +20,7 @@ limitations under the License. */
namespace paddle {
class CTCLayer : public Layer {
public:
public:
explicit CTCLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
......@@ -31,7 +31,7 @@ public:
const Argument& softmaxSeqs,
const Argument& labelSeqs);
protected:
protected:
size_t numClasses_;
bool normByTimes_;
std::vector<LinearChainCTC> ctcs_;
......
......@@ -24,11 +24,11 @@ namespace paddle {
*/
class ClipLayer : public Layer {
protected:
protected:
double min_;
double max_;
public:
public:
explicit ClipLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
......
......@@ -23,7 +23,7 @@ namespace paddle {
* each input as one row for the output of this layer and apply activation.
*/
class ConcatenateLayer : public Layer {
public:
public:
explicit ConcatenateLayer(const LayerConfig& config) : Layer(config) {}
~ConcatenateLayer() {}
......@@ -97,7 +97,7 @@ void ConcatenateLayer::backward(const UpdateCallback& callback) {
* processed by a Projection.
*/
class ConcatenateLayer2 : public Layer {
public:
public:
explicit ConcatenateLayer2(const LayerConfig& config) : Layer(config) {}
~ConcatenateLayer2() {}
......@@ -108,7 +108,7 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
protected:
protected:
std::vector<std::unique_ptr<Projection>> projections_;
std::vector<Argument> projOutput_;
std::vector<std::pair<size_t, size_t>> projCol_;
......
......@@ -42,7 +42,7 @@ namespace paddle {
* The config file api is context_projection.
*/
class ContextProjection : public Projection {
public:
public:
/**
* Constructor. If context_start is zero and context_lenth is one, it will
* set trainable_padding false. trainable_padding is an optional arguments
......@@ -63,7 +63,7 @@ public:
virtual bool init();
protected:
protected:
std::unique_ptr<Weight> weight_;
/// number of extra timesteps added at the beginning
size_t beginPad_;
......
......@@ -26,7 +26,7 @@ namespace paddle {
* calculate convolution operation.
*/
class Conv3DLayer : public ConvBaseLayer {
public:
public:
explicit Conv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {}
~Conv3DLayer() {}
......@@ -40,7 +40,7 @@ public:
void bpropWeights(int i);
size_t getSize();
protected:
protected:
// Figure out the dimensions for individual gemms.
IntV M_; /// numFilters_ / filter_group_;
IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_
......
......@@ -24,7 +24,7 @@ namespace paddle {
*/
class ConvBaseLayer : public Layer {
protected:
protected:
typedef std::vector<int> IntV;
/// True if it's deconv layer, false if it's convolution layer
......@@ -88,7 +88,7 @@ protected:
/// of output size.
bool caffeMode_;
public:
public:
explicit ConvBaseLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
......
......@@ -29,7 +29,7 @@ namespace paddle {
*/
class ConvBaseOperator : public Operator {
public:
public:
ConvBaseOperator(const OperatorConfig &config, bool useGpu);
/**
* Free workspace in device and destroy cudnn tensor descriptor.
......@@ -46,7 +46,7 @@ public:
hl_destroy_convolution_descriptor(convDesc_);
}
protected:
protected:
/**
* Get convolution parameters from layer config and
* initialize member variables.
......
......@@ -23,7 +23,7 @@ namespace paddle {
* @brief Base class for ConvProjection and ConvTransProjection.
*/
class ConvBaseProjection : public Projection {
public:
public:
/**
* Constructor.
*/
......@@ -33,7 +33,7 @@ public:
~ConvBaseProjection();
protected:
protected:
void getConvParams();
void initCudnn();
......
......@@ -29,7 +29,7 @@ namespace paddle {
*/
class ConvOperator : public ConvBaseOperator {
public:
public:
ConvOperator(const OperatorConfig &config, bool useGpu)
: ConvBaseOperator(config, useGpu) {}
/**
......
......@@ -23,7 +23,7 @@ namespace paddle {
* @brief Convolution projection do the same calculation with CudnnConvLayer.
*/
class ConvProjection : public ConvBaseProjection {
public:
public:
/**
* Constructor.
*/
......
......@@ -42,7 +42,7 @@ namespace paddle {
*/
class ConvShiftLayer : public Layer {
public:
public:
explicit ConvShiftLayer(const LayerConfig& config) : Layer(config) {}
~ConvShiftLayer() {}
......
......@@ -29,7 +29,7 @@ namespace paddle {
*/
class ConvTransOperator : public ConvBaseOperator {
public:
public:
ConvTransOperator(const OperatorConfig &config, bool useGpu)
: ConvBaseOperator(config, useGpu) {}
/**
......
......@@ -23,7 +23,7 @@ namespace paddle {
* @brief Convolution projection do the same calculation with CudnnConvLayer.
*/
class ConvTransProjection : public ConvBaseProjection {
public:
public:
/**
* Constructor.
*/
......
......@@ -36,7 +36,7 @@ namespace paddle {
* The config file api is linear_comb_layer.
*/
class ConvexCombinationLayer : public Layer {
protected:
protected:
/// A matrix pointer pointing to second input.
MatrixPtr tmpMtx0;
/// A matrix pointer pointing to first input.
......@@ -44,7 +44,7 @@ protected:
/// A matrix pointer pointing to output.
MatrixPtr tmpRow1;
public:
public:
explicit ConvexCombinationLayer(const LayerConfig& config) : Layer(config) {}
~ConvexCombinationLayer() {}
......
......@@ -33,7 +33,7 @@ namespace paddle {
* The config file api is cos_sim.
*/
class CosSimLayer : public Layer {
public:
public:
explicit CosSimLayer(const LayerConfig& config) : Layer(config) {}
~CosSimLayer() {}
......
......@@ -32,7 +32,7 @@ namespace paddle {
*/
class CosSimVecMatLayer : public Layer {
protected:
protected:
MatrixPtr tmpMtx0;
MatrixPtr tmpMtx1;
MatrixPtr tmpRow0;
......@@ -40,7 +40,7 @@ protected:
MatrixPtr tmpRow2;
MatrixPtr tmpRow3;
public:
public:
explicit CosSimVecMatLayer(const LayerConfig& config) : Layer(config) {}
~CosSimVecMatLayer() {}
......
......@@ -716,7 +716,7 @@ void HuberTwoClassification::backwardImp(Matrix& output,
* \f]
*/
class SumCostLayer : public Layer {
public:
public:
explicit SumCostLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
......
......@@ -29,7 +29,7 @@ namespace paddle {
* handled by the base class.
*/
class CostLayer : public Layer {
public:
public:
explicit CostLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
......@@ -51,7 +51,7 @@ public:
Argument& label,
Matrix& outputGrad) = 0;
protected:
protected:
LayerPtr weightLayer_;
real coeff_;
};
......@@ -65,7 +65,7 @@ protected:
* \f]
*/
class MultiClassCrossEntropy : public CostLayer {
public:
public:
explicit MultiClassCrossEntropy(const LayerConfig& config)
: CostLayer(config) {}
......@@ -95,7 +95,7 @@ public:
* In Proceedings of the ACL 2014 Conference.
*/
class MultiClassCrossEntropyWithSelfNorm : public CostLayer {
public:
public:
explicit MultiClassCrossEntropyWithSelfNorm(const LayerConfig& config)
: CostLayer(config) {}
......@@ -108,7 +108,7 @@ public:
Argument& label,
Matrix& outputGrad) override;
protected:
protected:
MatrixPtr sftMaxSum_;
MatrixPtr sumInv_;
};
......@@ -120,7 +120,7 @@ protected:
* \f]
*/
class SoftBinaryClassCrossEntropy : public CostLayer {
public:
public:
explicit SoftBinaryClassCrossEntropy(const LayerConfig& config)
: CostLayer(config) {}
......@@ -133,7 +133,7 @@ public:
Argument& label,
Matrix& outputGrad) override;
protected:
protected:
MatrixPtr targetPerDim_;
};
......@@ -145,7 +145,7 @@ protected:
* \f]
*/
class SumOfSquaresCostLayer : public CostLayer {
public:
public:
explicit SumOfSquaresCostLayer(const LayerConfig& config)
: CostLayer(config) {}
......@@ -171,7 +171,7 @@ public:
* x = output - label
*/
class SmoothL1CostLayer : public CostLayer {
public:
public:
explicit SmoothL1CostLayer(const LayerConfig& config) : CostLayer(config) {}
bool init(const LayerMap& layerMap,
......@@ -197,7 +197,7 @@ public:
* Rank useing Gradient Descent.
*/
class RankingCost : public Layer {
public:
public:
explicit RankingCost(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
......@@ -225,7 +225,7 @@ public:
(void)outputGrad;
}
private:
private:
double posPairCount_;
double negPairCount_;
MatrixPtr margin_;
......@@ -250,7 +250,7 @@ private:
* with Nonsmooth Cost Functions.
*/
class LambdaCost : public Layer {
public:
public:
explicit LambdaCost(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
......@@ -270,7 +270,7 @@ public:
real* gradData,
int size);
private:
private:
MatrixPtr marginGrad_;
int truncationSize_;
int maxSortSize_;
......@@ -287,10 +287,10 @@ private:
* \f]
*/
class MultiBinaryLabelCrossEntropy : public CostLayer {
protected:
protected:
MatrixPtr targetPerDim_;
public:
public:
explicit MultiBinaryLabelCrossEntropy(const LayerConfig& config)
: CostLayer(config) {}
......@@ -308,7 +308,7 @@ public:
* A base layer for HuberRegressionLoss and HuberTwoClassification.
*/
class HuberCost : public CostLayer {
public:
public:
std::vector<Argument> tmpCpuInput_;
explicit HuberCost(const LayerConfig& config) : CostLayer(config) {}
......@@ -331,7 +331,7 @@ public:
* Loss = delta * abs(y - f) - 0.5 * delta^2, otherwise
*/
class HuberRegressionLoss : public HuberCost {
public:
public:
explicit HuberRegressionLoss(const LayerConfig& config) : HuberCost(config) {}
bool init(const LayerMap& layerMap,
......@@ -343,7 +343,7 @@ public:
Argument& label,
Matrix& outputGrad) override;
protected:
protected:
real delta_;
};
......@@ -356,7 +356,7 @@ protected:
* Loss = 0, otherwise
*/
class HuberTwoClassification : public HuberCost {
public:
public:
explicit HuberTwoClassification(const LayerConfig& config)
: HuberCost(config) {}
......
......@@ -28,7 +28,7 @@ namespace paddle {
* crop input as this shape conf
*/
class CropLayer : public Layer {
public:
public:
explicit CropLayer(const LayerConfig& config) : Layer(config) {}
~CropLayer() {}
......@@ -38,7 +38,7 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
protected:
protected:
void setOutDims();
void setInDims();
......
......@@ -44,7 +44,7 @@ struct BeamExpansion {
typedef std::shared_ptr<BeamExpansion> BeamExpansionPtr;
class CostForOneSequence {
public:
public:
CostForOneSequence()
: beamSize_(0), validExpansionCount_(0), goldAsExtraPath_(false) {}
void setData(const BeamExpansionPtr bPtr, size_t beamSize) {
......@@ -64,7 +64,7 @@ public:
real forward();
void backward();
private:
private:
void calValidExpandStep();
void constructTotalExpansion();
size_t initLastExpansion();
......@@ -93,14 +93,14 @@ private:
};
class CrossEntropyOverBeam : public Layer {
public:
public:
explicit CrossEntropyOverBeam(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
private:
private:
void checkInputs();
void copyInputsToCpu();
void resizeOutput();
......
......@@ -30,7 +30,7 @@ namespace paddle {
*/
class CudnnBatchNormLayer : public BatchNormBaseLayer {
public:
public:
explicit CudnnBatchNormLayer(const LayerConfig& config)
: BatchNormBaseLayer(config) {}
......@@ -46,7 +46,7 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
protected:
protected:
/// Epsilon value used in the batch normalization formula.
/// Same epsilon value should be used in forward and backward functions.
double eps_;
......
......@@ -31,14 +31,14 @@ namespace paddle {
* The config file api is img_conv_layer.
*/
class CudnnConvBaseLayer : public ConvBaseLayer {
protected:
protected:
std::vector<std::unique_ptr<ProjectionConfig>> projConf_;
std::vector<std::unique_ptr<Projection>> projections_;
hl_tensor_descriptor biasDesc_;
hl_tensor_descriptor outputDesc_;
public:
public:
explicit CudnnConvBaseLayer(const LayerConfig& config)
: ConvBaseLayer(config) {}
......
......@@ -26,7 +26,7 @@ namespace paddle {
*/
class CudnnPoolLayer : public PoolLayer {
protected:
protected:
int windowHeight, windowWidth;
int heightPadding, widthPadding, strideHeight, strideWidth;
int imageH_, imageW_, outputH_, outputW_;
......@@ -40,7 +40,7 @@ protected:
/// A description of a pooling operation.
hl_pooling_descriptor poolingDesc_;
public:
public:
static bool typeCheck(const std::string& poolType,
hl_pooling_mode_t* mode = nullptr);
explicit CudnnPoolLayer(const LayerConfig& config);
......
......@@ -25,7 +25,7 @@ namespace paddle {
* The config file api is data_layer.
*/
class DataLayer : public Layer {
public:
public:
explicit DataLayer(const LayerConfig& config) : Layer(config) {}
virtual void setData(const Argument& data) { data_ = data; }
......@@ -58,10 +58,10 @@ public:
}
}
private:
private:
void copyDataToOutput(Argument& output);
protected:
protected:
Argument data_;
};
......
......@@ -37,7 +37,7 @@ namespace paddle {
*/
class DataNormLayer : public Layer {
public:
public:
enum NormalizationStrategy { kZScore = 0, kMinMax = 1, kDecimalScaling = 2 };
explicit DataNormLayer(const LayerConfig& config) : Layer(config) {}
......@@ -50,7 +50,7 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
protected:
protected:
int mode_;
std::unique_ptr<Weight> weight_;
MatrixPtr min_;
......
......@@ -27,7 +27,7 @@ namespace paddle {
* calculate deconvolution3D operation.
*/
class DeConv3DLayer : public ConvBaseLayer {
public:
public:
explicit DeConv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {}
~DeConv3DLayer() {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
......@@ -40,7 +40,7 @@ public:
void bpropWeights(int i);
size_t getSize();
protected:
protected:
// Figure out the dimensions for individual gemms.
IntV M_; /// numFilters_ / filter_group_;
IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_
......
......@@ -33,7 +33,7 @@ namespace paddle {
*/
class DetectionOutputLayer : public Layer {
public:
public:
explicit DetectionOutputLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
......@@ -42,7 +42,7 @@ public:
void backward(const UpdateCallback& callback = nullptr) {}
protected:
protected:
inline LayerPtr getPriorBoxLayer() { return inputLayers_[0]; }
inline LayerPtr getLocInputLayer(size_t index) {
......@@ -53,7 +53,7 @@ protected:
return inputLayers_[1 + inputNum_ + index];
}
private:
private:
size_t numClasses_; // number of classes
size_t inputNum_; // number of input layers
real nmsThreshold_;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册