提交 1ad8f821 编写于 作者: 朔-望's avatar 朔-望

modify to 2 spaces indent & format code & rm build folder

上级 e35ef6fe
......@@ -2,5 +2,4 @@
Language: Cpp
BasedOnStyle: LLVM
Standard: Cpp11
IndentWidth: 4
...
此差异已折叠。
......@@ -56,7 +56,7 @@ struct Print {
return *this;
}
private:
private:
void print(LogLevel level) {
buffer_ << std::endl;
if (level == kLOG_ERROR) {
......@@ -73,8 +73,7 @@ struct ToLog {
: level_(level) {
unsigned blanks =
(unsigned)(level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1);
printer_ << logs[level] << " " << info << ":"
<< std::string(blanks, ' ');
printer_ << logs[level] << " " << info << ":" << std::string(blanks, ' ');
}
template <typename T> ToLog &operator<<(T const &value) {
......@@ -84,7 +83,7 @@ struct ToLog {
~ToLog() { printer_.print(level_); }
private:
private:
LogLevel level_;
Print printer_;
};
......@@ -93,10 +92,10 @@ struct ToLog {
if (level > paddle_mobile::log_level) { \
} else \
paddle_mobile::ToLog( \
level, (std::stringstream() \
level, \
(std::stringstream() \
<< "[file: " \
<< (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) \
: __FILE__) \
<< (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) : __FILE__) \
<< "] [line: " << __LINE__ << "] ") \
.str())
......@@ -107,8 +106,7 @@ struct ToLog {
paddle_mobile::kLOG_DEBUG, \
(std::stringstream() \
<< "[file: " \
<< (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) \
: __FILE__) \
<< (strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) : __FILE__) \
<< "] [line: " << __LINE__ << "] ") \
.str())
} // namespace paddle_mobile
......@@ -144,7 +142,7 @@ struct Print {
friend struct ToLog;
template <typename T> Print &operator<<(T const &value) {}
private:
private:
};
struct ToLog {
......
......@@ -49,7 +49,7 @@ template <typename F> struct VariantHelper<F> {
};
template <size_t size> class RawData {
public:
public:
char data[size];
RawData() {}
RawData(const RawData &raw_data) { strcpy(data, raw_data.data); }
......@@ -87,7 +87,7 @@ template <typename... Ts> struct Variant {
size_t TypeId() const { return type_id; }
private:
private:
static inline size_t invalid_type() { return typeid(void).hash_code(); }
typedef VariantHelper<Ts...> helper;
size_t type_id;
......
......@@ -27,7 +27,7 @@ namespace framework {
class BlockDesc;
class Attribute {
public:
public:
static Attribute GetAttrValue(const proto::OpDesc::Attr &attr_desc) {
// std::cout << "begin get attr value" << std::endl;
Attribute attr;
......@@ -100,7 +100,7 @@ class Attribute {
template <typename T> T &Get() const { return variant_.Get<T>(); }
private:
private:
Variant<int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc *,
int64_t>
......@@ -110,7 +110,7 @@ class Attribute {
using AttributeMap = std::unordered_map<std::string, Attribute>;
class AttrReader {
public:
public:
explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {}
template <typename T> inline T Get(const std::string &name) const {
......@@ -121,7 +121,7 @@ class AttrReader {
return ((Attribute)attrs_.at(name)).Get<T>();
}
private:
private:
const AttributeMap &attrs_;
};
......
......@@ -27,7 +27,7 @@ namespace paddle_mobile {
namespace framework {
class BlockDesc : PaddleMobileObject {
public:
public:
BlockDesc(const proto::BlockDesc &desc);
const int &ID() const { return desc_.idx(); }
......@@ -35,8 +35,7 @@ class BlockDesc : PaddleMobileObject {
const int &Parent() const { return desc_.parent_idx(); }
bool operator==(const paddle_mobile::framework::BlockDesc &in_block) const {
return this->ID() == in_block.ID() &&
this->Parent() == in_block.Parent();
return this->ID() == in_block.ID() && this->Parent() == in_block.Parent();
}
bool operator<(const paddle_mobile::framework::BlockDesc &in_block) const {
......@@ -46,7 +45,7 @@ class BlockDesc : PaddleMobileObject {
std::vector<std::shared_ptr<VarDesc>> Vars() const;
std::vector<std::shared_ptr<OpDesc>> Ops() const;
private:
private:
proto::BlockDesc desc_;
std::vector<std::shared_ptr<OpDesc>> ops_;
std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_;
......
......@@ -90,26 +90,24 @@ DDim make_ddim(const std::vector<int> &dims) {
// XXX For some reason, putting this in an anonymous namespace causes
// errors
struct DynamicMutableIndexer : Vistor<int64_t &> {
public:
public:
explicit DynamicMutableIndexer(int idx) : idx_(idx) {}
template <int D> int64_t &operator()(Dim<D> &dim) const {
return dim[idx_];
}
template <int D> int64_t &operator()(Dim<D> &dim) const { return dim[idx_]; }
private:
private:
int idx_;
};
struct DynamicConstIndexer : public Vistor<int64_t> {
public:
public:
explicit DynamicConstIndexer(int idx) : idx_(idx) {}
template <int D> int64_t operator()(const Dim<D> &dim) const {
return dim[idx_];
}
private:
private:
int idx_;
};
......@@ -288,7 +286,7 @@ struct OSVistor : Vistor<std::ostream &> {
return os_ << dim;
}
private:
private:
std::ostream &os_;
};
......
......@@ -123,9 +123,7 @@ template <> struct DimGetter<0> {
return d.head;
}
// Return a reference if Dim is mutable
template <typename D> HOSTDEVICE static int64_t &impl(D &d) {
return d.head;
}
template <typename D> HOSTDEVICE static int64_t &impl(D &d) { return d.head; }
};
template <int D> HOSTDEVICE int64_t &indexer(Dim<D> &dim, int idx) {
......
......@@ -35,14 +35,14 @@ namespace paddle_mobile {
namespace framework {
template <typename Dtype> class Executor {
public:
public:
Executor();
Executor(const Program<Dtype> p);
std::shared_ptr<Tensor> predict(Tensor &t);
public:
public:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
......
此差异已折叠。
此差异已折叠。
......@@ -152,8 +152,7 @@ bool CheckLoD(const LoD &in, int tensor_height) {
// check: all the offsets in a level should be ascending(no same
// items
// allows).
if (!std::is_sorted(level.begin(), level.begin(),
[](size_t a, size_t b) {
if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
if (a < b)
return true;
return false;
......@@ -188,8 +187,7 @@ bool CheckAbsLoD(const LoD &in, int tensor_height) {
// check: all the offsets in a level should be ascending(no same
// items
// allows).
if (!std::is_sorted(level.begin(), level.begin(),
[](size_t a, size_t b) {
if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
if (a < b)
return true;
return false;
......
......@@ -102,7 +102,7 @@ bool CheckAbsLoD(const LoD &in, int tensor_height = -1);
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
class LoDTensor : public Tensor {
public:
public:
LoDTensor() : Tensor() {}
explicit LoDTensor(const LoD &lod) : lod_(lod) {}
......@@ -139,7 +139,7 @@ class LoDTensor : public Tensor {
return (lod_)[level].size() - 1;
}
private:
private:
LoD lod_;
};
......
......@@ -26,7 +26,7 @@ namespace paddle_mobile {
namespace framework {
class OpDesc : PaddleMobileObject {
public:
public:
OpDesc(const proto::OpDesc &desc);
const std::vector<std::string> &Input(const std::string &name) const;
const std::vector<std::string> &Output(const std::string &name) const;
......@@ -40,7 +40,7 @@ class OpDesc : PaddleMobileObject {
const std::string &Type() { return desc_.type(); };
private:
private:
proto::OpDesc desc_;
VariableNameMap inputs_;
VariableNameMap outputs_;
......
......@@ -39,7 +39,7 @@ template <typename Dtype> class OpInfoMap;
template <typename Dtype> static OpInfoMap<Dtype> *g_op_info_map = nullptr;
template <typename Dtype> class OpInfoMap {
public:
public:
static OpInfoMap &Instance() {
if (g_op_info_map<Dtype> == nullptr) {
g_op_info_map<Dtype> = new OpInfoMap();
......@@ -83,7 +83,7 @@ template <typename Dtype> class OpInfoMap {
return &map_;
}
private:
private:
OpInfoMap() = default;
std::unordered_map<std::string, OpInfo<Dtype>> map_;
......
......@@ -27,8 +27,7 @@ struct OpKernelType {
struct Hash {
size_t operator()(const OpKernelType &key) const {
int data_type = static_cast<int>(key.data_type_) << LEFT_SHIFT;
int data_layout = static_cast<int>(key.data_layout_)
<< (LEFT_SHIFT * 2);
int data_layout = static_cast<int>(key.data_layout_) << (LEFT_SHIFT * 2);
std::hash<int> hasher;
return hasher(data_type + data_layout);
......
......@@ -49,7 +49,7 @@ static std::unordered_map<
{"fetch", {{"X"}, {"Out"}}}};
template <typename Dtype> class OperatorBase : PaddleMobileObject {
public:
public:
OperatorBase(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope);
......@@ -66,30 +66,30 @@ template <typename Dtype> class OperatorBase : PaddleMobileObject {
}
}
protected:
protected:
std::shared_ptr<Scope> scope_;
std::string type_;
VariableNameMap inputs_;
VariableNameMap outputs_;
AttributeMap attrs_;
private:
private:
void CheckAllInputOutputSet() const;
};
template <typename Dtype>
class OperatorWithKernel : public OperatorBase<Dtype> {
public:
public:
OperatorWithKernel(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const AttributeMap &attrs, std::shared_ptr<Scope> scope)
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope)
: OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {}
virtual void InferShape() const = 0;
virtual void Run() const = 0;
};
template <typename Dtype, typename P> class OpKernelBase : PaddleMobileObject {
public:
public:
virtual void Compute(const P &para) const = 0;
virtual ~OpKernelBase() = default;
......
......@@ -24,13 +24,13 @@ SOFTWARE.
namespace paddle_mobile {
class PaddleMobileObject {
public:
public:
virtual std::string ToString() {
char address[128] = {0};
sprintf(address, "%p", this);
return std::string(address);
}
private:
private:
};
} // namespace paddle_mobile
......@@ -29,7 +29,7 @@ namespace paddle_mobile {
namespace framework {
class Node : PaddleMobileObject {
public:
public:
Node(const std::string &type) : type_(type) {}
Node(std::shared_ptr<OpDesc> op_desc)
: op_desc_(op_desc), type_(op_desc->Type()){};
......@@ -39,7 +39,7 @@ class Node : PaddleMobileObject {
Node &To(int index);
uint depth(uint begin = 0);
private:
private:
std::shared_ptr<OpDesc> op_desc_;
std::string ToString(std::string blank, const Node *node) const;
std::vector<std::shared_ptr<Node>> outputs_;
......
......@@ -35,8 +35,7 @@ ProgramOptimize::FushionOptimize(std::shared_ptr<ProgramDesc> ori_des) {
auto op = block->Ops()[j];
auto op_type = op->Type();
// DLOG << "op type: " << op_type << " index: " << j;
if (op_input_output_key.find(op->Type()) ==
op_input_output_key.end()) {
if (op_input_output_key.find(op->Type()) == op_input_output_key.end()) {
return NULL;
}
......
......@@ -26,13 +26,13 @@ namespace paddle_mobile {
namespace framework {
class ProgramOptimize {
public:
public:
ProgramOptimize() {}
std::shared_ptr<ProgramDesc> Optimize();
std::shared_ptr<ProgramDesc>
FushionOptimize(std::shared_ptr<ProgramDesc> ori_des);
private:
private:
// std::shared_ptr<ProgramDesc> ori_desc_;
std::vector<std::unordered_map<std::string, std::shared_ptr<Node>>>
outputs_nodes_;
......
......@@ -28,12 +28,12 @@ namespace framework {
template <typename Dtype, Precision P = Precision::FP32>
class Program : PaddleMobileObject {
public:
public:
std::shared_ptr<ProgramDesc> originProgram;
std::shared_ptr<ProgramDesc> optimizeProgram;
std::shared_ptr<Scope> scope;
private:
private:
};
} // namespace framework
......
......@@ -28,12 +28,12 @@ namespace paddle_mobile {
namespace framework {
class ProgramDesc : PaddleMobileObject {
public:
public:
ProgramDesc(const proto::ProgramDesc &desc);
std::shared_ptr<BlockDesc> Block(size_t idx);
const std::vector<std::shared_ptr<BlockDesc>> &Blocks() { return blocks_; };
private:
private:
std::vector<std::shared_ptr<BlockDesc>> blocks_;
proto::ProgramDesc desc_;
};
......
......@@ -26,7 +26,7 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
class Scope {
public:
public:
Scope() {}
~Scope() {}
......@@ -67,7 +67,7 @@ class Scope {
Variable *FindVarLocally(const std::string &name) const;
private:
private:
// Call Scope::NewScope for a sub-scope.
explicit Scope(Scope const *parent) : parent_(parent) {}
......
......@@ -27,7 +27,7 @@ namespace paddle_mobile {
namespace framework {
class SelectedRows {
public:
public:
SelectedRows(const std::vector<int64_t> &rows, const int64_t &height)
: rows_(rows), height_(height) {
value_.reset(new Tensor());
......@@ -67,7 +67,7 @@ class SelectedRows {
return make_ddim(dims);
}
private:
private:
// Notice: rows can be duplicate. We can have {0, 4, 7, 0, 5, 7, 9}
// here.
// SelectedRows are simply concated when adding together. Until a
......
......@@ -56,8 +56,7 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> {
};
static inline size_t SizeOfType(std::type_index type) {
SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t>
functor;
SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t> functor;
size_t size = functor(type);
// PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s",
// type.name());
......@@ -67,7 +66,7 @@ static inline size_t SizeOfType(std::type_index type) {
class LoDTensor;
class Tensor {
public:
public:
Tensor() : offset_(0) {}
/*! Return a pointer to mutable memory block. */
......@@ -78,8 +77,8 @@ class Tensor {
// typeid(T).hash_code(),
// "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name());
return reinterpret_cast<T *>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_);
}
/*! Return a pointer to constant memory block. */
......@@ -236,7 +235,7 @@ class Tensor {
inline void set_layout(const DataLayout layout) { layout_ = layout; }
private:
private:
/**
* @note Placeholder hides type T, so it doesn't appear as a
* template
......
......@@ -189,8 +189,7 @@ void TensorFromStream(std::istream &is, framework::Tensor *tensor) {
{ // read tensor
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(),
std::back_inserter(dims));
std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
tensor->Resize(framework::make_ddim(dims));
void *buf;
......
......@@ -25,7 +25,7 @@ namespace paddle_mobile {
namespace framework {
class VarDesc {
public:
public:
VarDesc(const proto::VarDesc &desc);
std::string Name() const { return desc_.name(); }
......@@ -80,7 +80,7 @@ class VarDesc {
return this->RepeatedToVector(tensor_desc().dims());
}
private:
private:
proto::VarDesc desc_;
};
......
......@@ -28,7 +28,7 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
class Variable : public PaddleMobileObject {
public:
public:
template <typename T> const T *Get() const {
return static_cast<const T *>(holder_->Ptr());
}
......@@ -67,7 +67,7 @@ class Variable : public PaddleMobileObject {
void SetName(const std::string *name) { name_ = name; }
private:
private:
struct Placeholder {
Placeholder() = default;
virtual ~Placeholder() = default;
......
......@@ -174,10 +174,8 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
auto var = scope->Var(var_desc->Name());
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
if (var_desc->Persistable() &&
var_desc->GetType() !=
framework::proto::VarType::FEED_MINIBATCH &&
var_desc->GetType() !=
framework::proto::VarType::FETCH_LIST) {
var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
var_desc->GetType() != framework::proto::VarType::FETCH_LIST) {
framework::LoDTensor *tensor =
var->GetMutable<framework::LoDTensor>();
// to load
......@@ -268,8 +266,7 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
}
if (var.persistable() &&
var.type().type() !=
framework::proto::VarType::FEED_MINIBATCH &&
var.type().type() != framework::proto::VarType::FEED_MINIBATCH &&
var.type().type() != framework::proto::VarType::FETCH_LIST) {
// std::cout << " to load " << var.name() <<
// std::endl;
......@@ -289,8 +286,7 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
// 2 Lod information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level),
sizeof(lod_level));
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
// std::cout << " load level: " << lod_level <<
// std::endl;
// std::cout << " lod info: " << std::endl;
......
......@@ -29,10 +29,10 @@ namespace paddle_mobile {
template <typename Dtype, Precision P = Precision::FP32>
class Loader : PaddleMobileObject {
public:
public:
const framework::Program<Dtype, P> Load(const std::string &dirname);
private:
private:
void LoadVar(framework::LoDTensor *tensor, const std::string &file_path);
};
......
......@@ -40,7 +40,7 @@ void Free(void *ptr);
template <typename T> class PODDeleter {
static_assert(std::is_pod<T>::value, "T must be POD");
public:
public:
explicit PODDeleter(){};
void operator()(T *ptr) { Free(static_cast<void *>(ptr)); }
......@@ -55,7 +55,7 @@ template <typename T> class PODDeleter {
* reinterpret_cast
*/
template <typename T> class PlainDeleter {
public:
public:
explicit PlainDeleter(){};
void operator()(T *ptr) { Free(reinterpret_cast<void *>(ptr)); }
......
......@@ -27,13 +27,13 @@ using namespace framework;
template <typename DeviceType, typename T>
class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
public:
public:
BatchNormOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
......@@ -44,7 +44,7 @@ class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
protected:
BatchNormParam param_;
};
......
......@@ -26,13 +26,12 @@ using namespace framework;
template <typename DeviceType, typename T>
class ConcatOp : public framework::OperatorWithKernel<DeviceType> {
public:
public:
ConcatOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
......@@ -43,7 +42,7 @@ class ConcatOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
protected:
ConcatParam param_;
};
......
......@@ -60,9 +60,9 @@ void ConvOp<Dtype, T>::InferShape() const {
std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
for (size_t i = 0; i < strides.size(); ++i) {
output_shape.push_back(ConvOutputSize(in_dims[i + 2],
filter_dims[i + 2], dilations[i],
paddings[i], strides[i]));
output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
dilations[i], paddings[i],
strides[i]));
}
framework::DDim ddim = framework::make_ddim(output_shape);
......
......@@ -28,12 +28,12 @@ using namespace framework;
template <typename DeviceType, typename T>
class ConvOp : public framework::OperatorWithKernel<DeviceType> {
public:
public:
ConvOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
......@@ -45,7 +45,7 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> {
this->ClearVariables({"Filter", "Input"});
}
private:
private:
ConvParam param_;
};
......
......@@ -27,13 +27,13 @@ using namespace framework;
template <typename DeviceType, typename T>
class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
public:
public:
ElementwiseAddOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
......@@ -44,7 +44,7 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
protected:
ElementwiseAddParam param_;
};
} // namespace operators
......
......@@ -67,15 +67,14 @@ void BatchNormKernel<CPU, float>::Compute(const BatchNormParam &param) const {
/// (x * inv_var * scale) + (bias - est_mean * inv_var * scale)
for (int i = 0; i < C; i++) {
new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i];
new_bias_ptr[i] =
bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i];
new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i];
{
for (int n = 0; n < N; n++) {
for (int h = 0; h < H; h++) {
for (int w = 0; w < W; w++) {
int index = n * stride0 + i * stride1 + h * stride2 + w;
out_ptr[index] = input_x_ptr[index] * new_scale_ptr[i] +
new_bias_ptr[i];
out_ptr[index] =
input_x_ptr[index] * new_scale_ptr[i] + new_bias_ptr[i];
}
}
}
......
......@@ -19,7 +19,7 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
template <typename T> class ConcatFunctor {
public:
public:
void operator()(const std::vector<framework::Tensor> &input, const int axis,
framework::Tensor *output) {
size_t num = input.size();
......@@ -80,8 +80,7 @@ void StridedNumelCopyWithAxis(int64_t axis, T *dst,
}
for (int64_t i = 0; i < before; ++i) {
memory::Copy(dst + i * dst_after, src + i * src_after,
sizeof(T) * size);
memory::Copy(dst + i * dst_after, src + i * src_after, sizeof(T) * size);
}
}
......@@ -98,9 +97,9 @@ void ConcatKernel<CPU, float>::Compute(const ConcatParam &param) const {
for (auto *in : inputs) {
auto in_stride = framework::stride_numel(in->dims());
auto out_stride = framework::stride_numel(out->dims());
StridedNumelCopyWithAxis<float>(
axis, out->data<float>() + output_offset, out_stride,
in->data<float>(), in_stride, in_stride[axis]);
StridedNumelCopyWithAxis<float>(axis, out->data<float>() + output_offset,
out_stride, in->data<float>(), in_stride,
in_stride[axis]);
output_offset += in_stride[axis];
}
} else {
......
......@@ -138,12 +138,10 @@ template <> void ConvKernel<CPU, float>::Compute(const ConvParam &param) const {
}
// gemm
Tensor out_slice =
out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice =
filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul<float>(filter_slice, false, col_matrix, false,
float(1.0), &out_slice, float(0.0));
Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul<float>(filter_slice, false, col_matrix, false, float(1.0),
&out_slice, float(0.0));
}
}
}
......
......@@ -28,7 +28,7 @@ using namespace framework;
template <typename DeviceType, typename T>
class BatchNormKernel
: public framework::OpKernelBase<DeviceType, BatchNormParam> {
public:
public:
void Compute(const BatchNormParam &param) const;
};
......
......@@ -26,7 +26,7 @@ using namespace framework;
template <typename DeviceType, typename T>
class ConcatKernel : public framework::OpKernelBase<DeviceType, ConcatParam> {
public:
public:
void Compute(const ConcatParam &param) const;
};
......
......@@ -31,7 +31,7 @@ using namespace framework;
template <typename DeviceType, typename T>
class ConvKernel : public framework::OpKernelBase<DeviceType, ConvParam> {
public:
public:
void Compute(const ConvParam &param) const;
};
} // namespace operators
......
......@@ -29,7 +29,7 @@ using namespace framework;
template <typename DeviceType, typename T>
class ElementwiseAddKernel
: public framework::OpKernelBase<DeviceType, ElementwiseAddParam> {
public:
public:
void Compute(const ElementwiseAddParam &param) const;
};
} // namespace operators
......
......@@ -26,8 +26,8 @@ namespace operators {
using namespace framework;
template <typename T> struct LRNFunctor {
void operator()(const framework::Tensor &input, framework::Tensor *out,
int N, int C, int H, int W, int n, T k, T alpha, T beta) {
void operator()(const framework::Tensor &input, framework::Tensor *out, int N,
int C, int H, int W, int n, T k, T alpha, T beta) {
auto input_ptr = input.data<T>();
const int start = -(n - 1) / 2;
const int end = start + n;
......@@ -47,14 +47,11 @@ template <typename T> struct LRNFunctor {
if (channel >= 0 && channel < C) {
for (int c = 0; c < H; c++) {
for (int d = 0; d < W; d++) {
int u =
a * stride0 + b * stride1 + c * stride2 + d;
int u = a * stride0 + b * stride1 + c * stride2 + d;
int i = a * stride0 + channel * stride1 +
c * stride2 + d;
int i = a * stride0 + channel * stride1 + c * stride2 + d;
sqr_buffer_ptr[u] +=
alpha * input_ptr[i] * input_ptr[i];
sqr_buffer_ptr[u] += alpha * input_ptr[i] * input_ptr[i];
}
}
}
......@@ -70,7 +67,7 @@ template <typename T> struct LRNFunctor {
template <typename DeviceType, typename T>
class LrnKernel : public framework::OpKernelBase<DeviceType, LrnParam> {
public:
public:
void Compute(const LrnParam &param) const;
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ using namespace framework;
template <typename DeviceType, typename T>
class MulKernel : public framework::OpKernelBase<DeviceType, MulParam> {
public:
public:
void Compute(const MulParam &param) const;
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ using namespace framework;
template <typename DeviceType, typename T>
class PoolKernel : public framework::OpKernelBase<DeviceType, PoolParam> {
public:
public:
void Compute(const PoolParam &param) const;
};
} // namespace operators
......
......@@ -27,12 +27,12 @@ using namespace framework;
template <typename DeviceType, typename T>
class LrnOp : public framework::OperatorWithKernel<DeviceType> {
public:
public:
LrnOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
......@@ -43,7 +43,7 @@ class LrnOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
protected:
LrnParam param_;
};
......
......@@ -69,7 +69,7 @@ inline void trim_trailing_singular_dims(framework::DDim *dims) {
}
template <typename T> class RowwiseTransformIterator {
public:
public:
RowwiseTransformIterator(const T *ptr, int n) : ptr_(ptr), i_(0), n_(n) {}
RowwiseTransformIterator<T> &operator++() {
......@@ -90,7 +90,7 @@ template <typename T> class RowwiseTransformIterator {
const T &operator*() { return ptr_[i_]; }
private:
private:
const T *ptr_;
int i_;
int64_t n_;
......@@ -101,7 +101,7 @@ template <typename T> class RowwiseTransformIterator {
/// in (4,20,2) is 2 ,
/// (20,1) move 1 stride , to fill(add) 2 element with the same number.
template <typename T> class MidWiseTransformIterator {
public:
public:
MidWiseTransformIterator(const T *ptr, int n, int post)
: ptr_(ptr), i_(0), j_(0), n_(n), post_(post) {}
......@@ -127,7 +127,7 @@ template <typename T> class MidWiseTransformIterator {
const T &operator*() { return ptr_[i_]; }
private:
private:
const T *ptr_;
int64_t i_;
int64_t j_;
......@@ -137,7 +137,7 @@ template <typename T> class MidWiseTransformIterator {
template <typename Functor, typename T, typename OutType = T>
class TransformFunctor {
public:
public:
TransformFunctor(const framework::Tensor *x, const framework::Tensor *y,
framework::Tensor *z, Functor func)
: x_(x->data<T>()), y_(y->data<T>()), z_(z->mutable_data<OutType>()),
......@@ -156,11 +156,10 @@ class TransformFunctor {
inline void RunMidWise(int n, int pre, int post) const {
math::Transform trans;
trans(x_, x_ + nx_, MidWiseTransformIterator<T>(y_, n, post), z_,
func_);
trans(x_, x_ + nx_, MidWiseTransformIterator<T>(y_, n, post), z_, func_);
}
private:
private:
const T *x_;
const T *y_;
OutType *z_;
......
......@@ -26,9 +26,8 @@ namespace math {
* output_width]
*/
template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
public:
void operator()(const framework::Tensor &im,
const std::vector<int> &dilation,
public:
void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col) {
// PADDLE_ENFORCE(im.dims().size() == 3);
......@@ -72,17 +71,13 @@ template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
int h_offset = (c / filter_width) % filter_height;
int c_im = c / (filter_width * filter_height);
for (int h = 0; h < col_height; ++h) {
int im_row_idx =
h * stride[0] - padding[0] + h_offset * dilation[0];
int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0];
for (int w = 0; w < col_width; ++w) {
int im_col_idx =
w * stride[1] - padding[1] + w_offset * dilation[1];
int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1];
int col_idx = (c * col_height + h) * col_width + w;
int im_idx =
(im_row_idx + c_im * im_height) * im_width + im_col_idx;
int im_idx = (im_row_idx + c_im * im_height) * im_width + im_col_idx;
col_data[col_idx] =
(im_row_idx < 0 || im_row_idx >= im_height ||
col_data[col_idx] = (im_row_idx < 0 || im_row_idx >= im_height ||
im_col_idx < 0 || im_col_idx >= im_width)
? static_cast<T>(0)
: im_data[im_idx];
......@@ -99,7 +94,7 @@ template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
* output_width]
*/
template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
public:
public:
void operator()(const framework::Tensor &col,
const std::vector<int> &dilation,
const std::vector<int> &stride,
......@@ -145,15 +140,12 @@ template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
int h_offset = (c / filter_width) % filter_height;
int c_im = c / (filter_width * filter_height);
for (int h = 0; h < col_height; ++h) {
int im_row_idx =
h * stride[0] - padding[0] + h_offset * dilation[0];
int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0];
for (int w = 0; w < col_width; ++w) {
int im_col_idx =
w * stride[1] - padding[1] + w_offset * dilation[1];
int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1];
if ((im_row_idx) >= 0 && (im_row_idx) < im_height &&
(im_col_idx) >= 0 && (im_col_idx) < im_width) {
im_data[(im_row_idx + c_im * im_height) * im_width +
im_col_idx] +=
im_data[(im_row_idx + c_im * im_height) * im_width + im_col_idx] +=
col_data[(c * col_height + h) * col_width + w];
}
}
......@@ -174,9 +166,8 @@ template class Col2ImFunctor<ColFormat::kCFO, CPU, double>;
* filter_width]
*/
template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
public:
void operator()(const framework::Tensor &im,
const std::vector<int> &dilation,
public:
void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col) {
// PADDLE_ENFORCE(im.dims().size() == 3);
......@@ -210,29 +201,25 @@ template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
for (int channel = 0; channel < im_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) {
int im_row_offset = col_row_idx * stride[0] +
filter_row_idx - padding[0];
for (int filter_col_idx = 0;
filter_col_idx < filter_width; ++filter_col_idx) {
int im_col_offset = col_col_idx * stride[1] +
filter_col_idx - padding[1];
int im_row_offset =
col_row_idx * stride[0] + filter_row_idx - padding[0];
for (int filter_col_idx = 0; filter_col_idx < filter_width;
++filter_col_idx) {
int im_col_offset =
col_col_idx * stride[1] + filter_col_idx - padding[1];
int col_offset =
((((col_row_idx)*col_width + col_col_idx) *
im_channels +
((((col_row_idx)*col_width + col_col_idx) * im_channels +
channel) *
filter_height +
filter_row_idx) *
filter_width +
filter_col_idx;
int im_offset =
(channel * im_height + im_row_offset) *
im_width +
int im_offset = (channel * im_height + im_row_offset) * im_width +
im_col_offset;
col_data[col_offset] =
(im_row_offset < 0 ||
im_row_offset >= im_height ||
(im_row_offset < 0 || im_row_offset >= im_height ||
im_col_offset < 0 || im_col_offset >= im_width)
? static_cast<T>(0)
: im_data[im_offset];
......@@ -251,7 +238,7 @@ template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
* filter_width]
*/
template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
public:
public:
void operator()(const framework::Tensor &col,
const std::vector<int> &dilation,
const std::vector<int> &stride,
......@@ -287,29 +274,25 @@ template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
for (int channel = 0; channel < im_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) {
int im_row_offset = col_row_idx * stride[0] +
filter_row_idx - padding[0];
for (int filter_col_idx = 0;
filter_col_idx < filter_width; ++filter_col_idx) {
int im_col_offset = col_col_idx * stride[1] +
filter_col_idx - padding[1];
int im_row_offset =
col_row_idx * stride[0] + filter_row_idx - padding[0];
for (int filter_col_idx = 0; filter_col_idx < filter_width;
++filter_col_idx) {
int im_col_offset =
col_col_idx * stride[1] + filter_col_idx - padding[1];
int col_offset =
(((col_row_idx * col_width + col_col_idx) *
im_channels +
(((col_row_idx * col_width + col_col_idx) * im_channels +
channel) *
filter_height +
filter_row_idx) *
filter_width +
filter_col_idx;
if (im_row_offset >= 0 &&
im_row_offset < im_height &&
im_col_offset >= 0 &&
im_col_offset < im_width) {
if (im_row_offset >= 0 && im_row_offset < im_height &&
im_col_offset >= 0 && im_col_offset < im_width) {
int im_offset =
(channel * im_height + im_row_offset) *
im_width +
(channel * im_height + im_row_offset) * im_width +
im_col_offset;
im_data[im_offset] += col_data[col_offset];
}
......
......@@ -89,16 +89,15 @@ enum class ColFormat { kCFO = 0, kOCF = 1 };
*/
template <ColFormat Format, typename DeviceType, typename T>
class Im2ColFunctor {
public:
void operator()(const framework::Tensor &im,
const std::vector<int> &dilation,
public:
void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col);
};
template <ColFormat Format, typename DeviceType, typename T>
class Col2ImFunctor {
public:
public:
void operator()(const framework::Tensor &col,
const std::vector<int> &dilation,
const std::vector<int> &stride,
......
......@@ -30,9 +30,8 @@ namespace math {
*/
template <typename PoolProcess, typename T>
class PoolFunctor<CPU, PoolProcess, T> {
public:
void operator()(const framework::Tensor &input,
const std::vector<int> &ksize,
public:
void operator()(const framework::Tensor &input, const std::vector<int> &ksize,
const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_process,
framework::Tensor *output) {
......@@ -77,8 +76,7 @@ class PoolFunctor<CPU, PoolProcess, T> {
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(
input_data[h * input_width + w], &ele);
pool_process.compute(input_data[h * input_width + w], &ele);
}
}
int pool_size = (hend - hstart) * (wend - wstart);
......
......@@ -38,7 +38,7 @@ namespace math {
* MaxPoolGrad and AvgPoolGrad are gradient operations respectively.
*/
template <class T> class MaxPool {
public:
public:
inline T initial() { return static_cast<T>(-FLT_MAX); }
inline void compute(const T &x, T *y) { *y = *y > x ? *y : x; }
......@@ -47,7 +47,7 @@ template <class T> class MaxPool {
};
template <class T> class AvgPool {
public:
public:
inline T initial() { return static_cast<T>(0); }
inline void compute(const T &x, T *y) { *y += x; }
......@@ -57,9 +57,8 @@ template <class T> class AvgPool {
template <typename DeviceType, typename PoolProcess, typename T>
class PoolFunctor {
public:
void operator()(const framework::Tensor &input,
const std::vector<int> &ksize,
public:
void operator()(const framework::Tensor &input, const std::vector<int> &ksize,
const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_compute,
framework::Tensor *output);
......
......@@ -26,7 +26,7 @@ using Tensor = paddle_mobile::framework::Tensor;
* output_depth, output_height, output_width]
*/
template <typename T> class Vol2ColFunctor<CPU, T> {
public:
public:
void operator()(const Tensor &vol, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *col) const {
......@@ -81,28 +81,21 @@ template <typename T> class Vol2ColFunctor<CPU, T> {
int d_offset = (c / filter_width / filter_height) % filter_depth;
int c_in = c / filter_width / filter_height / filter_depth;
for (int d = 0; d < output_depth; ++d) {
int d_pad =
d * strides[0] - paddings[0] + d_offset * dilations[0];
int d_pad = d * strides[0] - paddings[0] + d_offset * dilations[0];
for (int h = 0; h < output_height; ++h) {
int h_pad =
h * strides[1] - paddings[1] + h_offset * dilations[1];
int h_pad = h * strides[1] - paddings[1] + h_offset * dilations[1];
for (int w = 0; w < output_width; ++w) {
int w_pad = w * strides[2] - paddings[2] +
w_offset * dilations[2];
int w_pad = w * strides[2] - paddings[2] + w_offset * dilations[2];
int col_idx =
((c * output_depth + d) * output_height + h) *
output_width +
w;
((c * output_depth + d) * output_height + h) * output_width + w;
int vol_idx =
((c_in * input_depth + d_pad) * input_height +
h_pad) *
((c_in * input_depth + d_pad) * input_height + h_pad) *
input_width +
w_pad;
col_data[col_idx] =
(h_pad < 0 || h_pad >= input_height || w_pad < 0 ||
w_pad >= input_width || d_pad < 0 ||
d_pad >= input_depth)
w_pad >= input_width || d_pad < 0 || d_pad >= input_depth)
? static_cast<T>(0)
: vol_data[vol_idx];
}
......@@ -119,7 +112,7 @@ template <typename T> class Vol2ColFunctor<CPU, T> {
* output_depth, output_height, output_width]
*/
template <typename T> class Col2VolFunctor<CPU, T> {
public:
public:
void operator()(const Tensor &col, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *vol) const {
......@@ -173,27 +166,21 @@ template <typename T> class Col2VolFunctor<CPU, T> {
int d_offset = (c / filter_width / filter_height) % filter_depth;
int cIm = c / filter_width / filter_height / filter_depth;
for (int d = 0; d < output_depth; ++d) {
int d_pad =
d * strides[0] - paddings[0] + d_offset * dilations[0];
int d_pad = d * strides[0] - paddings[0] + d_offset * dilations[0];
for (int h = 0; h < output_height; ++h) {
int h_pad =
h * strides[1] - paddings[1] + h_offset * dilations[1];
int h_pad = h * strides[1] - paddings[1] + h_offset * dilations[1];
for (int w = 0; w < output_width; ++w) {
int w_pad = w * strides[2] - paddings[2] +
w_offset * dilations[2];
int w_pad = w * strides[2] - paddings[2] + w_offset * dilations[2];
if (h_pad >= 0 && h_pad < input_height && w_pad >= 0 &&
w_pad < input_width && d_pad >= 0 &&
d_pad < input_depth) {
w_pad < input_width && d_pad >= 0 && d_pad < input_depth) {
int vol_idx =
((cIm * input_depth + d_pad) * input_height +
h_pad) *
((cIm * input_depth + d_pad) * input_height + h_pad) *
input_width +
w_pad;
int col_idx =
((c * output_depth + d) * output_height + h) *
output_width +
((c * output_depth + d) * output_height + h) * output_width +
w;
vol_data[vol_idx] += col_data[col_idx];
}
......
......@@ -73,14 +73,14 @@ namespace math {
using Tensor = paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> class Vol2ColFunctor {
public:
public:
void operator()(const Tensor &vol, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *col) const;
};
template <typename DeviceType, typename T> class Col2VolFunctor {
public:
public:
void operator()(const Tensor &col, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *vol) const;
......
......@@ -27,12 +27,12 @@ using namespace framework;
template <typename DeviceType, typename T>
class MulOp : public framework::OperatorWithKernel<DeviceType> {
public:
public:
MulOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
......@@ -43,7 +43,7 @@ class MulOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
protected:
MulParam param_;
};
......
......@@ -24,8 +24,7 @@ Print &operator<<(Print &printer, const ConvParam &conv_param) {
printer << "parameter of conv: "
<< "\n";
printer << " stride: "
<< " (" << conv_param.Strides()[0] << conv_param.Strides()[1]
<< ") "
<< " (" << conv_param.Strides()[0] << conv_param.Strides()[1] << ") "
<< "\n";
printer << " paddings: "
<< " (" << conv_param.Paddings()[0] << conv_param.Paddings()[1]
......
......@@ -31,8 +31,8 @@ namespace operators {
using namespace framework;
class OpParam : PaddleMobileObject {
public:
protected:
public:
protected:
template <typename T>
static T *InputFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetVarValue<T>("Input", inputs, scope);
......@@ -62,8 +62,7 @@ class OpParam : PaddleMobileObject {
return GetVarValue<T>("Mean", inputs, scope);
}
template <typename T>
static T *InputScaleFrom(const VariableNameMap &inputs,
const Scope &scope) {
static T *InputScaleFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetVarValue<T>("Scale", inputs, scope);
}
......@@ -104,8 +103,8 @@ class OpParam : PaddleMobileObject {
}
template <typename T>
static T *GetVarValue(const std::string &key,
const VariableNameMap &var_map, const Scope &scope) {
static T *GetVarValue(const std::string &key, const VariableNameMap &var_map,
const Scope &scope) {
auto var_vec = var_map.at(key);
if (!var_vec.empty()) {
// std::cout << " get var value -- " << var_vec[0] <<
......@@ -133,7 +132,7 @@ class OpParam : PaddleMobileObject {
};
class ConvParam : OpParam {
public:
public:
ConvParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
......@@ -160,7 +159,7 @@ class ConvParam : OpParam {
const int &Groups() const { return groups; }
private:
private:
Tensor *input_;
Tensor *output_;
LoDTensor *filter_;
......@@ -173,7 +172,7 @@ class ConvParam : OpParam {
Print &operator<<(Print &printer, const ConvParam &conv_param);
class ElementwiseAddParam : OpParam {
public:
public:
ElementwiseAddParam(const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
......@@ -192,7 +191,7 @@ class ElementwiseAddParam : OpParam {
const int &Axis() const { return axis_; }
private:
private:
Tensor *input_x_;
Tensor *input_y_;
Tensor *out_;
......@@ -200,7 +199,7 @@ class ElementwiseAddParam : OpParam {
};
class MulParam : OpParam {
public:
public:
MulParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
......@@ -221,7 +220,7 @@ class MulParam : OpParam {
const int &YNumColDims() const { return y_num_col_dims_; }
private:
private:
Tensor *input_x_;
Tensor *input_y_;
Tensor *out_;
......@@ -230,7 +229,7 @@ class MulParam : OpParam {
};
class ConcatParam : public OpParam {
public:
public:
ConcatParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
......@@ -245,14 +244,14 @@ class ConcatParam : public OpParam {
const int &Axis() const { return axis_; }
private:
private:
std::vector<Tensor *> inputs_;
Tensor *out_;
int axis_;
};
class LrnParam : public OpParam {
public:
public:
LrnParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
......@@ -282,7 +281,7 @@ class LrnParam : public OpParam {
const std::string &DataFormat() const { return data_format_; }
private:
private:
Tensor *input_x_;
Tensor *out_;
Tensor *mid_out_;
......@@ -293,9 +292,8 @@ class LrnParam : public OpParam {
std::string data_format_;
};
class BatchNormParam : OpParam {
public:
BatchNormParam(const VariableNameMap &inputs,
const VariableNameMap &outputs,
public:
BatchNormParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
......@@ -329,7 +327,7 @@ class BatchNormParam : OpParam {
const std::string &DataFormat() const { return data_format_; }
private:
private:
Tensor *input_x_;
Tensor *output_y_;
Tensor *input_bias_;
......@@ -342,7 +340,7 @@ class BatchNormParam : OpParam {
std::string data_format_;
};
class PoolParam : public OpParam {
public:
public:
PoolParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
......@@ -373,7 +371,7 @@ class PoolParam : public OpParam {
bool isGlobalPooling() const { return gloabal_pooling_; }
private:
private:
Tensor *input_;
Tensor *output_;
std::string pooling_type_;
......
......@@ -49,8 +49,8 @@ void PoolOp<DeviceType, T>::InferShape() const {
}
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) {
output_shape.push_back(PoolOutputSize(
in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode));
output_shape.push_back(PoolOutputSize(in_x_dims[i + 2], ksize[i],
paddings[i], strides[i], ceil_mode));
}
param_.Output()->Resize(framework::make_ddim(output_shape));
DLOG << "infer shape out size =" << param_.Output()->numel();
......
......@@ -28,12 +28,12 @@ using namespace framework;
template <typename DeviceType, typename T>
class PoolOp : public framework::OperatorWithKernel<DeviceType> {
public:
public:
PoolOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
......@@ -45,7 +45,7 @@ class PoolOp : public framework::OperatorWithKernel<DeviceType> {
this->ClearVariables({"X"});
}
private:
private:
PoolParam param_;
};
} // namespace operators
......
......@@ -17,7 +17,7 @@ limitations under the License. */
// Disable the copy and assignment operator for a class.
#ifndef DISABLE_COPY_AND_ASSIGN
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
private: \
classname(const classname &) = delete; \
classname(classname &&) = delete; \
classname &operator=(const classname &) = delete; \
......
......@@ -38,8 +38,8 @@ Executor4Test<DeviceType, OpType>::Executor4Test(const Program<DeviceType> p,
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == op_type) {
std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>(
op->Type(), op->GetInputs(), op->GetOutputs(),
op->GetAttrMap(), this->program_.scope);
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
this->program_.scope);
this->ops_of_block_[*block_desc.get()].push_back(op_ptr);
break;
......
......@@ -27,7 +27,7 @@ using namespace paddle_mobile::framework;
template <typename DeviceType, typename OpType>
class Executor4Test : public Executor<DeviceType> {
public:
public:
Executor4Test(const Program<DeviceType> p, std::string op_type);
std::shared_ptr<Tensor> predict(Tensor &t, std::string input,
......
......@@ -24,7 +24,7 @@ namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestBatchNormOp {
public:
public:
explicit TestBatchNormOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -52,8 +52,7 @@ template <typename Dtype> class TestBatchNormOp {
DLOG << " Input Scale is : " << op->Input("Scale")[0];
DLOG << " Input Bias is : " << op->Input("Bias")[0];
DLOG << " Output Y is : " << op->Output("Y")[0];
DLOG << " epsilon : "
<< op->GetAttrMap().at("epsilon").Get<float>();
DLOG << " epsilon : " << op->GetAttrMap().at("epsilon").Get<float>();
std::shared_ptr<operators::BatchNormOp<Dtype, float>> lrn =
std::make_shared<operators::BatchNormOp<Dtype, float>>(
op->Type(), op->GetInputs(), op->GetOutputs(),
......@@ -101,7 +100,7 @@ template <typename Dtype> class TestBatchNormOp {
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -113,8 +112,7 @@ template <typename Dtype> class TestBatchNormOp {
const Tensor &t4, const Tensor &t5, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size();
++j) {
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()";
op->Run();
......@@ -140,8 +138,7 @@ int main() {
auto *inputx1_ptr = inputx1.data<float>();
paddle_mobile::framework::Tensor mean;
SetupTensor<float>(&mean, {10}, static_cast<float>(0),
static_cast<float>(1));
SetupTensor<float>(&mean, {10}, static_cast<float>(0), static_cast<float>(1));
auto *mean_ptr = mean.data<float>();
paddle_mobile::framework::Tensor scale;
......@@ -155,12 +152,11 @@ int main() {
auto *variance_ptr = variance.data<float>();
paddle_mobile::framework::Tensor bias;
SetupTensor<float>(&bias, {10}, static_cast<float>(0),
static_cast<float>(1));
SetupTensor<float>(&bias, {10}, static_cast<float>(0), static_cast<float>(1));
auto *bias_ptr = bias.data<float>();
paddle_mobile::framework::TestBatchNormOp<paddle_mobile::CPU>
testBatchNormOp(program);
paddle_mobile::framework::TestBatchNormOp<paddle_mobile::CPU> testBatchNormOp(
program);
auto output_bn =
testBatchNormOp.predict_bn(inputx1, mean, scale, variance, bias);
......
......@@ -24,7 +24,7 @@ namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestConcatOp {
public:
public:
explicit TestConcatOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -41,15 +41,13 @@ template <typename Dtype> class TestConcatOp {
// DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "concat" &&
op->Input("X")[0] == "conv2d_3.tmp_1") {
if (op->Type() == "concat" && op->Input("X")[0] == "conv2d_3.tmp_1") {
DLOG << " mul attr size: " << op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: " << op->GetOutputs().size();
DLOG << " Input X is : " << op->Input("X")[0];
DLOG << " Output Out is : " << op->Output("Out")[0];
DLOG << " axis : "
<< op->GetAttrMap().at("axis").Get<int>();
DLOG << " axis : " << op->GetAttrMap().at("axis").Get<int>();
std::shared_ptr<operators::ConcatOp<Dtype, float>> concat =
std::make_shared<operators::ConcatOp<Dtype, float>>(
......@@ -94,7 +92,7 @@ template <typename Dtype> class TestConcatOp {
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -106,8 +104,7 @@ template <typename Dtype> class TestConcatOp {
const Tensor &t4, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size();
++j) {
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()";
op->Run();
......@@ -168,8 +165,7 @@ int main() {
/// output (4,100,2,2)
int input_index =
input_n * stride0 + input_c * stride1 + input_h * stride2 + input_w;
int output_index =
input_n * 100 * 2 * 2 +
int output_index = input_n * 100 * 2 * 2 +
(input_c + inputx1.dims()[1] + inputx2.dims()[1]) * 2 * 2 +
input_h * 2 + input_w;
......
此差异已折叠。
......@@ -24,7 +24,7 @@ namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestMulOp {
public:
public:
explicit TestMulOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -41,8 +41,7 @@ template <typename Dtype> class TestMulOp {
// DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "mul" &&
op->Input("X")[0] == "pool2d_0.tmp_0") {
if (op->Type() == "mul" && op->Input("X")[0] == "pool2d_0.tmp_0") {
DLOG << " mul attr size: " << op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: " << op->GetOutputs().size();
......@@ -88,7 +87,7 @@ template <typename Dtype> class TestMulOp {
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -99,8 +98,7 @@ template <typename Dtype> class TestMulOp {
void predict_mul(const Tensor &t1, const Tensor &t2, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size();
++j) {
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()";
op->Run();
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册