提交 d2f8befa 编写于 作者: L liuruilong

format files

上级 c9ffe855
......@@ -17,15 +17,14 @@ SOFTWARE.
==============================================================================*/
#pragma once;
#include "framework/attribute.h"
#include <map>
#include <string>
#include "framework/attribute.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
class OperatorBase;
template <typename Dtype> class OperatorBase;
class OpDesc;
class BlockDesc;
class InferShapeContext;
......@@ -34,20 +33,20 @@ class InferShapeContext;
using VariableNameMap = std::map<std::string, std::vector<std::string>>;
template <typename Dtype>
using OpCreator = std::function<framework::OperatorBase<Dtype>*(
const std::string& /*type*/, const VariableNameMap& /*inputs*/,
const VariableNameMap& /*outputs*/,
const framework::AttributeMap& /*attrs*/)>;
using OpCreator = std::function<framework::OperatorBase<Dtype> *(
const std::string & /*type*/, const VariableNameMap & /*inputs*/,
const VariableNameMap & /*outputs*/,
const framework::AttributeMap & /*attrs*/)>;
using GradOpMakerFN =
std::function<std::vector<std::unique_ptr<framework::OpDesc>>(
const framework::OpDesc&,
const std::unordered_set<std::string>& /*no_grad_set*/,
std::unordered_map<std::string, std::string>* /*grad_to_var*/,
const std::vector<framework::BlockDesc*>& grad_block)>;
const framework::OpDesc &,
const std::unordered_set<std::string> & /*no_grad_set*/,
std::unordered_map<std::string, std::string> * /*grad_to_var*/,
const std::vector<framework::BlockDesc *> &grad_block)>;
using InferVarTypeFN = std::function<void(const framework::OpDesc& /*op_desc*/,
framework::BlockDesc* /*block*/)>;
using InferVarTypeFN = std::function<void(const framework::OpDesc & /*op_desc*/,
framework::BlockDesc * /*block*/)>;
using InferShapeFN = std::function<void(framework::InferShapeContext*)>;
using InferShapeFN = std::function<void(framework::InferShapeContext *)>;
};
......@@ -24,8 +24,7 @@ enum class Precision : int { FP32 = 0 };
//! device type
enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 };
template <DeviceTypeEnum T>
struct DeviceType {};
template <DeviceTypeEnum T> struct DeviceType {};
typedef DeviceType<kCPU> CPU;
typedef DeviceType<kFPGA> FPGA;
......
......@@ -21,13 +21,9 @@ SOFTWARE.
#pragma once
namespace paddle_mobile {
template <int ID, typename Type>
struct IDToType {
typedef Type type_t;
};
template <int ID, typename Type> struct IDToType { typedef Type type_t; };
template <typename F, typename... Ts>
struct VariantHelper {
template <typename F, typename... Ts> struct VariantHelper {
static const size_t size = sizeof(F) > VariantHelper<Ts...>::size
? sizeof(F)
: VariantHelper<Ts...>::size;
......@@ -41,8 +37,7 @@ struct VariantHelper {
}
};
template <typename F>
struct VariantHelper<F> {
template <typename F> struct VariantHelper<F> {
static const size_t size = sizeof(F);
inline static void Destroy(size_t id, void *data) {
if (id == typeid(F).hash_code()) {
......@@ -53,9 +48,8 @@ struct VariantHelper<F> {
}
};
template <size_t size>
class RawData {
public:
template <size_t size> class RawData {
public:
char data[size];
RawData() {}
RawData(const RawData &raw_data) { strcpy(data, raw_data.data); }
......@@ -64,8 +58,7 @@ class RawData {
// }
};
template <typename... Ts>
struct Variant {
template <typename... Ts> struct Variant {
Variant(const Variant &variant) {
// std::cout << " 赋值构造函数 " << std::endl;
type_id = variant.type_id;
......@@ -77,15 +70,13 @@ struct Variant {
// helper::Destroy(type_id, &data);
}
template <typename T, typename... Args>
void Set(Args &&... args) {
template <typename T, typename... Args> void Set(Args &&... args) {
helper::Destroy(type_id, &data);
new (&data) T(std::forward<Args>(args)...);
type_id = typeid(T).hash_code();
}
template <typename T>
T &Get() const {
template <typename T> T &Get() const {
if (type_id == typeid(T).hash_code()) {
return *const_cast<T *>(reinterpret_cast<const T *>(&data));
} else {
......@@ -96,16 +87,13 @@ struct Variant {
size_t TypeId() const { return type_id; }
private:
private:
static inline size_t invalid_type() { return typeid(void).hash_code(); }
typedef VariantHelper<Ts...> helper;
size_t type_id;
RawData<helper::size> data;
};
template <typename T>
struct Vistor {
typedef T type_t;
};
template <typename T> struct Vistor { typedef T type_t; };
} // namespace paddle_mobile
} // namespace paddle_mobile
......@@ -20,4 +20,4 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {}
} // namespace paddle_mobile
} // namespace paddle_mobile
......@@ -27,86 +27,82 @@ namespace framework {
class BlockDesc;
class Attribute {
public:
static Attribute GetAttrValue(const proto::OpDesc::Attr& attr_desc) {
public:
static Attribute GetAttrValue(const proto::OpDesc::Attr &attr_desc) {
// std::cout << "begin get attr value" << std::endl;
Attribute attr;
switch (attr_desc.type()) {
case proto::AttrType::BOOLEAN: {
attr.Set<bool>(attr_desc.b());
break;
}
case proto::AttrType::INT: {
attr.Set<int>(attr_desc.i());
break;
}
case proto::AttrType::FLOAT: {
attr.Set<float>(attr_desc.f());
break;
}
case proto::AttrType::STRING: {
attr.Set<std::string>(attr_desc.s());
break;
}
case proto::AttrType::BOOLEANS: {
std::vector<bool> val(attr_desc.bools_size());
for (int i = 0; i < attr_desc.bools_size(); ++i) {
val[i] = attr_desc.bools(i);
}
attr.Set<std::vector<bool>>(val);
break;
}
case proto::AttrType::INTS: {
std::vector<int> val(attr_desc.ints_size());
for (int i = 0; i < attr_desc.ints_size(); ++i) {
val[i] = attr_desc.ints(i);
}
attr.Set<std::vector<int>>(val);
break;
case proto::AttrType::BOOLEAN: {
attr.Set<bool>(attr_desc.b());
break;
}
case proto::AttrType::INT: {
attr.Set<int>(attr_desc.i());
break;
}
case proto::AttrType::FLOAT: {
attr.Set<float>(attr_desc.f());
break;
}
case proto::AttrType::STRING: {
attr.Set<std::string>(attr_desc.s());
break;
}
case proto::AttrType::BOOLEANS: {
std::vector<bool> val(attr_desc.bools_size());
for (int i = 0; i < attr_desc.bools_size(); ++i) {
val[i] = attr_desc.bools(i);
}
case proto::AttrType::FLOATS: {
std::vector<float> val(attr_desc.floats_size());
for (int i = 0; i < attr_desc.floats_size(); ++i) {
val[i] = attr_desc.floats(i);
}
attr.Set<std::vector<float>>(val);
break;
attr.Set<std::vector<bool>>(val);
break;
}
case proto::AttrType::INTS: {
std::vector<int> val(attr_desc.ints_size());
for (int i = 0; i < attr_desc.ints_size(); ++i) {
val[i] = attr_desc.ints(i);
}
case proto::AttrType::STRINGS: {
std::vector<std::string> val(attr_desc.strings_size());
for (int i = 0; i < attr_desc.strings_size(); ++i) {
val[i] = attr_desc.strings(i);
}
attr.Set<std::vector<std::string>>(val);
break;
attr.Set<std::vector<int>>(val);
break;
}
case proto::AttrType::FLOATS: {
std::vector<float> val(attr_desc.floats_size());
for (int i = 0; i < attr_desc.floats_size(); ++i) {
val[i] = attr_desc.floats(i);
}
case proto::AttrType::LONG: {
attr.Set<int64_t>(attr_desc.l());
break;
attr.Set<std::vector<float>>(val);
break;
}
case proto::AttrType::STRINGS: {
std::vector<std::string> val(attr_desc.strings_size());
for (int i = 0; i < attr_desc.strings_size(); ++i) {
val[i] = attr_desc.strings(i);
}
default:
// std::cout << " not support " << std::endl;
break;
attr.Set<std::vector<std::string>>(val);
break;
}
case proto::AttrType::LONG: {
attr.Set<int64_t>(attr_desc.l());
break;
}
default:
// std::cout << " not support " << std::endl;
break;
}
// std::cout << "end get attr value" << std::endl;
return attr;
}
Attribute() {}
template <typename T, typename... Args>
Attribute& Set(Args&&... args) {
template <typename T, typename... Args> Attribute &Set(Args &&... args) {
variant_.Set<T>(args...);
return *this;
}
template <typename T>
T& Get() const {
return variant_.Get<T>();
}
template <typename T> T &Get() const { return variant_.Get<T>(); }
private:
private:
Variant<int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc*,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc *,
int64_t>
variant_;
};
......@@ -114,20 +110,19 @@ class Attribute {
using AttributeMap = std::unordered_map<std::string, Attribute>;
class AttrReader {
public:
explicit AttrReader(const AttributeMap& attrs) : attrs_(attrs) {}
public:
explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {}
template <typename T>
inline T Get(const std::string& name) const {
template <typename T> inline T Get(const std::string &name) const {
// PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in
// AttributeMap",
// name);
return ((Attribute)attrs_.at(name)).Get<T>();
}
private:
const AttributeMap& attrs_;
private:
const AttributeMap &attrs_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -46,5 +46,5 @@ BlockDesc::BlockDesc(const proto::BlockDesc &desc) : desc_(desc) {
}
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -27,7 +27,7 @@ namespace paddle_mobile {
namespace framework {
class BlockDesc : PaddleMobileObject {
public:
public:
BlockDesc(const proto::BlockDesc &desc);
const int &ID() const { return desc_.idx(); }
......@@ -45,19 +45,18 @@ class BlockDesc : PaddleMobileObject {
std::vector<std::shared_ptr<VarDesc>> Vars() const;
std::vector<std::shared_ptr<OpDesc>> Ops() const;
private:
private:
proto::BlockDesc desc_;
std::vector<std::shared_ptr<OpDesc>> ops_;
std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
namespace std {
template <>
struct hash<paddle_mobile::framework::BlockDesc> {
template <> struct hash<paddle_mobile::framework::BlockDesc> {
typedef paddle_mobile::framework::BlockDesc argument_type;
typedef std::size_t result_type;
result_type operator()(argument_type const &s) const noexcept {
......@@ -67,4 +66,4 @@ struct hash<paddle_mobile::framework::BlockDesc> {
}
};
} // namespace std
} // namespace std
......@@ -27,7 +27,7 @@ enum class DataLayout {
kAnyLayout = 2,
};
inline DataLayout StringToDataLayout(const std::string& str) {
inline DataLayout StringToDataLayout(const std::string &str) {
std::string s(str);
for (size_t i = 0; i < s.size(); ++i) {
s[i] = toupper(s[i]);
......@@ -44,24 +44,24 @@ inline DataLayout StringToDataLayout(const std::string& str) {
}
}
inline std::string DataLayoutToString(const DataLayout& data_layout) {
inline std::string DataLayoutToString(const DataLayout &data_layout) {
switch (data_layout) {
case DataLayout::kNHWC:
return "NHWC";
case DataLayout::kNCHW:
return "NCHW";
case DataLayout::kAnyLayout:
return "ANY_LAYOUT";
default:
break;
// std::cout << "unknown DataLayou %d", data_layout;
case DataLayout::kNHWC:
return "NHWC";
case DataLayout::kNCHW:
return "NCHW";
case DataLayout::kAnyLayout:
return "ANY_LAYOUT";
default:
break;
// std::cout << "unknown DataLayou %d", data_layout;
}
}
inline std::ostream& operator<<(std::ostream& out, const DataLayout& l) {
inline std::ostream &operator<<(std::ostream &out, const DataLayout &l) {
out << DataLayoutToString(l);
return out;
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -23,14 +23,14 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
static void PassTensorData(Tensor* from, Tensor* to) {
static void PassTensorData(Tensor *from, Tensor *to) {
to->ShareDataWith(*from);
*from = Tensor();
}
void DataTransform(const OpKernelType& expected_kernel_type,
const OpKernelType& kernel_type_for_var,
const Tensor& input_tensor, Tensor* output_tensor) {
void DataTransform(const OpKernelType &expected_kernel_type,
const OpKernelType &kernel_type_for_var,
const Tensor &input_tensor, Tensor *output_tensor) {
bool transformed = false;
Tensor in;
in.ShareDataWith(input_tensor);
......@@ -64,8 +64,8 @@ void DataTransform(const OpKernelType& expected_kernel_type,
output_tensor->ShareDataWith(in);
}
void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor,
Variable& out_var) {
void CopyVariableWithTensor(const Variable &in_var, const Tensor &tensor,
Variable &out_var) {
// if (in_var.IsType<LoDTensor>()) {
// auto& in_lod_tensor = in_var.Get<LoDTensor>();
// auto* tran_lod_tensor = out_var.GetMutable<LoDTensor>();
......@@ -83,5 +83,5 @@ void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor,
// }
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -30,12 +30,12 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
void DataTransform(const OpKernelType& expected_kernel_type,
const OpKernelType& kernel_type_for_var,
const Tensor& input_tensor, Tensor* out);
void DataTransform(const OpKernelType &expected_kernel_type,
const OpKernelType &kernel_type_for_var,
const Tensor &input_tensor, Tensor *out);
void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor,
Variable& out_var);
void CopyVariableWithTensor(const Variable &in_var, const Tensor &tensor,
Variable &out_var);
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -40,4 +40,4 @@ namespace framework {
// }
// }
}
} // namespace paddle_mobile
} // namespace paddle_mobile
......@@ -19,52 +19,48 @@ namespace framework {
/// @cond HIDDEN
template <int i>
Dim<i> make_dim(const int64_t* d) {
template <int i> Dim<i> make_dim(const int64_t *d) {
return Dim<i>(*d, make_dim<i - 1>(d + 1));
}
template <>
Dim<0> make_dim<0>(const int64_t* d) {
return Dim<0>(*d);
}
template <> Dim<0> make_dim<0>(const int64_t *d) { return Dim<0>(*d); }
void make_ddim(DDim& ddim, const int64_t* dims, int n) {
void make_ddim(DDim &ddim, const int64_t *dims, int n) {
switch (n) {
case 0:
ddim = make_dim<0>(dims);
break;
case 1:
ddim = make_dim<1>(dims);
break;
case 2:
ddim = make_dim<2>(dims);
break;
case 3:
ddim = make_dim<3>(dims);
break;
case 4:
ddim = make_dim<4>(dims);
break;
case 5:
ddim = make_dim<5>(dims);
break;
case 6:
ddim = make_dim<6>(dims);
break;
case 7:
ddim = make_dim<7>(dims);
break;
case 8:
ddim = make_dim<8>(dims);
break;
case 9:
ddim = make_dim<9>(dims);
break;
default:
// std::cout << "Dynamic dimensions must have between [1, 9]
// dimensions.";
break;
case 0:
ddim = make_dim<0>(dims);
break;
case 1:
ddim = make_dim<1>(dims);
break;
case 2:
ddim = make_dim<2>(dims);
break;
case 3:
ddim = make_dim<3>(dims);
break;
case 4:
ddim = make_dim<4>(dims);
break;
case 5:
ddim = make_dim<5>(dims);
break;
case 6:
ddim = make_dim<6>(dims);
break;
case 7:
ddim = make_dim<7>(dims);
break;
case 8:
ddim = make_dim<8>(dims);
break;
case 9:
ddim = make_dim<9>(dims);
break;
default:
// std::cout << "Dynamic dimensions must have between [1, 9]
// dimensions.";
break;
}
}
......@@ -76,13 +72,13 @@ DDim make_ddim(std::initializer_list<int64_t> dims) {
return result;
}
DDim make_ddim(const std::vector<int64_t>& dims) {
DDim make_ddim(const std::vector<int64_t> &dims) {
DDim result(make_dim(0));
make_ddim(result, &dims[0], dims.size());
return result;
}
DDim make_ddim(const std::vector<int>& dims) {
DDim make_ddim(const std::vector<int> &dims) {
std::vector<int64_t> res(dims.size());
std::transform(dims.begin(), dims.end(), res.begin(),
[](int d) { return static_cast<int64_t>(d); });
......@@ -91,35 +87,31 @@ DDim make_ddim(const std::vector<int>& dims) {
/// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors
struct DynamicMutableIndexer : Vistor<int64_t&> {
public:
struct DynamicMutableIndexer : Vistor<int64_t &> {
public:
explicit DynamicMutableIndexer(int idx) : idx_(idx) {}
template <int D>
int64_t& operator()(Dim<D>& dim) const {
return dim[idx_];
}
template <int D> int64_t &operator()(Dim<D> &dim) const { return dim[idx_]; }
private:
private:
int idx_;
};
struct DynamicConstIndexer : public Vistor<int64_t> {
public:
public:
explicit DynamicConstIndexer(int idx) : idx_(idx) {}
template <int D>
int64_t operator()(const Dim<D>& dim) const {
template <int D> int64_t operator()(const Dim<D> &dim) const {
return dim[idx_];
}
private:
private:
int idx_;
};
/// @endcond
int64_t& DDim::operator[](int idx) {
int64_t &DDim::operator[](int idx) {
return DDim::ApplyVistor(DynamicMutableIndexer(idx), *this);
}
......@@ -178,27 +170,26 @@ DDim DDim::operator*(DDim d) const {
return make_ddim(v3);
}
int64_t get(const DDim& ddim, int idx) { return ddim[idx]; }
int64_t get(const DDim &ddim, int idx) { return ddim[idx]; }
void set(DDim& ddim, int idx, int value) { ddim[idx] = value; }
void set(DDim &ddim, int idx, int value) { ddim[idx] = value; }
/// @cond HIDDEN
struct VectorizeVisitor : Vistor<void> {
std::vector<int64_t>& vector;
std::vector<int64_t> &vector;
explicit VectorizeVisitor(std::vector<int64_t>& v) : vector(v) {}
explicit VectorizeVisitor(std::vector<int64_t> &v) : vector(v) {}
template <typename T>
void operator()(const T& t) {
template <typename T> void operator()(const T &t) {
vector.push_back(t.head);
this->operator()(t.tail);
}
void operator()(const Dim<0>& t) {}
void operator()(const Dim<0> &t) {}
};
/// @endcond
std::vector<int64_t> vectorize(const DDim& ddim) {
std::vector<int64_t> vectorize(const DDim &ddim) {
std::vector<int64_t> result;
VectorizeVisitor visitor(result);
DDim::ApplyVistor(visitor, ddim);
......@@ -207,30 +198,29 @@ std::vector<int64_t> vectorize(const DDim& ddim) {
// NOTE: framework::vectorize converts to type int64_t
// which does not fit cudnn inputs.
std::vector<int> vectorize2int(const DDim& ddim) {
std::vector<int> vectorize2int(const DDim &ddim) {
std::vector<int64_t> temp = vectorize(ddim);
std::vector<int> result(temp.begin(), temp.end());
return result;
}
struct ProductVisitor : Vistor<int64_t> {
template <int D>
int64_t operator()(const Dim<D>& dim) {
template <int D> int64_t operator()(const Dim<D> &dim) {
return product(dim);
}
};
int64_t product(const DDim& ddim) {
int64_t product(const DDim &ddim) {
ProductVisitor visitor;
return DDim::ApplyVistor(visitor, ddim);
}
struct SliceVectorizeVisitor : Vistor<void> {
std::vector<int64_t>& vector;
std::vector<int64_t> &vector;
int begin;
int end;
SliceVectorizeVisitor(std::vector<int64_t>& v, int b, int e)
SliceVectorizeVisitor(std::vector<int64_t> &v, int b, int e)
: vector(v), begin(b), end(e) {
// PADDLE_ENFORCE(begin < end,
// "Begin index must be less than end index in ddim
......@@ -239,8 +229,7 @@ struct SliceVectorizeVisitor : Vistor<void> {
// "Begin index can't be less than zero in ddim slice.");
}
template <int S>
void operator()(const Dim<S>& dim) {
template <int S> void operator()(const Dim<S> &dim) {
if (begin == 0) {
vector.push_back(dim.head);
} else {
......@@ -252,12 +241,12 @@ struct SliceVectorizeVisitor : Vistor<void> {
}
}
void operator()(const Dim<0>& dim) {
void operator()(const Dim<0> &dim) {
// PADDLE_ENFORCE(end == 0, "End index in ddim slice is out of bound.");
}
};
DDim slice_ddim(const DDim& ddim, int begin, int end) {
DDim slice_ddim(const DDim &ddim, int begin, int end) {
std::vector<int64_t> vec;
vec.reserve(end - begin);
SliceVectorizeVisitor visitor(vec, begin, end);
......@@ -270,15 +259,12 @@ DDim slice_ddim(const DDim& ddim, int begin, int end) {
/// \cond HIDDEN
struct ArityVisitor : Vistor<int> {
template <int D>
int operator()(Dim<D>) const {
return D;
}
template <int D> int operator()(Dim<D>) const { return D; }
};
/// \endcond
int arity(const DDim& d) {
int arity(const DDim &d) {
ArityVisitor arityVisitor = ArityVisitor();
return DDim::ApplyVistor(arityVisitor, d);
// return arityVisitor(d.var.Get<Dim<4>>());
......@@ -288,19 +274,18 @@ int arity(const DDim& d) {
/// \endcond
struct OSVistor : Vistor<std::ostream&> {
OSVistor(std::ostream& os) : os_(os) {}
struct OSVistor : Vistor<std::ostream &> {
OSVistor(std::ostream &os) : os_(os) {}
template <int D>
std::ostream& operator()(Dim<D> dim) const {
template <int D> std::ostream &operator()(Dim<D> dim) const {
return os_ << dim;
}
private:
std::ostream& os_;
private:
std::ostream &os_;
};
std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
std::ostream &operator<<(std::ostream &os, const DDim &ddim) {
auto vistor = OSVistor(os);
DDim::ApplyVistor(vistor, ddim);
return os;
......@@ -310,15 +295,15 @@ DDim::DDim(std::initializer_list<int64_t> init_list) {
*this = make_ddim(init_list);
}
DDim flatten_to_2d(const DDim& src, int num_col_dims) {
DDim flatten_to_2d(const DDim &src, int num_col_dims) {
int rank = src.size();
return make_ddim({product(slice_ddim(src, 0, num_col_dims)),
product(slice_ddim(src, num_col_dims, rank))});
}
DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); }
DDim flatten_to_1d(const DDim &src) { return make_ddim({product(src)}); }
DDim stride(const DDim& ddim) {
DDim stride(const DDim &ddim) {
std::vector<int64_t> strides(ddim.size());
strides[ddim.size() - 1] = 1;
for (int i = ddim.size() - 2; i >= 0; --i) {
......@@ -327,7 +312,7 @@ DDim stride(const DDim& ddim) {
return framework::make_ddim(strides);
}
DDim stride_numel(const framework::DDim& ddim) {
DDim stride_numel(const framework::DDim &ddim) {
std::vector<int64_t> strides(ddim.size());
strides[ddim.size() - 1] = ddim[ddim.size() - 1];
for (int i = ddim.size() - 2; i >= 0; --i) {
......@@ -336,5 +321,5 @@ DDim stride_numel(const framework::DDim& ddim) {
return framework::make_ddim(strides);
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -14,12 +14,12 @@ limitations under the License. */
#pragma once
#include "common/variant.h"
#include "dim.h"
#include <assert.h>
#include <initializer_list>
#include <stdexcept>
#include <vector>
#include "common/variant.h"
#include "dim.h"
namespace paddle_mobile {
namespace framework {
......@@ -66,15 +66,11 @@ struct DDim {
DDim() { var.Set<Dim<1>>(Dim<1>()); }
template <int D>
explicit DDim(const Dim<D> &in) {
var.Set<Dim<D>>(in);
}
template <int D> explicit DDim(const Dim<D> &in) { var.Set<Dim<D>>(in); }
/*implicit*/ DDim(std::initializer_list<int64_t> init_list);
template <int D>
DDim &operator=(const Dim<D> &in) {
template <int D> DDim &operator=(const Dim<D> &in) {
var.Set<Dim<D>>(in);
return *this;
}
......@@ -161,5 +157,5 @@ DDim flatten_to_1d(const DDim &src);
DDim stride(const DDim &ddim);
DDim stride_numel(const DDim &ddim);
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -24,8 +24,7 @@ namespace paddle_mobile {
namespace framework {
// Statically sized, statically indexed dimension
template <int i>
struct Dim {
template <int i> struct Dim {
static constexpr int dimensions = i;
template <typename... Args>
......@@ -35,7 +34,7 @@ struct Dim {
}
HOSTDEVICE
Dim(int64_t _head, const Dim<i - 1>& _tail) : head(_head), tail(_tail) {}
Dim(int64_t _head, const Dim<i - 1> &_tail) : head(_head), tail(_tail) {}
HOSTDEVICE
Dim() : head(0), tail() {}
......@@ -43,7 +42,7 @@ struct Dim {
/** Construct a Dim from a linear index and size. Uses Fortran order
* indexing. */
HOSTDEVICE
Dim(int64_t idx, const Dim<i>& size)
Dim(int64_t idx, const Dim<i> &size)
: head(idx % size.head), tail(idx / size.head, size.tail) {}
/** Construct a Dim with each dimension set to the given index */
......@@ -51,15 +50,15 @@ struct Dim {
Dim(int64_t idx) : head(idx), tail(idx) {}
HOSTDEVICE
bool operator==(const Dim<i>& o) const {
bool operator==(const Dim<i> &o) const {
return (head == o.head) && (tail == o.tail);
}
HOSTDEVICE
bool operator!=(const Dim<i>& o) const { return !(*this == o); }
bool operator!=(const Dim<i> &o) const { return !(*this == o); }
HOSTDEVICE
int64_t& operator[](int idx);
int64_t &operator[](int idx);
HOSTDEVICE
int64_t operator[](int idx) const;
......@@ -70,8 +69,7 @@ struct Dim {
};
// Base case specialization
template <>
struct Dim<0> {
template <> struct Dim<0> {
static constexpr int dimensions = 0;
HOSTDEVICE
......@@ -81,7 +79,7 @@ struct Dim<0> {
Dim() {}
HOSTDEVICE
Dim(int idx, const Dim<0>& size) {
Dim(int idx, const Dim<0> &size) {
#ifndef __CUDA_ARCH__
if (idx > 0) {
throw std::invalid_argument("Index out of range.");
......@@ -92,13 +90,13 @@ struct Dim<0> {
}
HOSTDEVICE
bool operator==(const Dim<0>& o) const { return true; }
bool operator==(const Dim<0> &o) const { return true; }
HOSTDEVICE
bool operator!=(const Dim<0>& o) const { return false; }
bool operator!=(const Dim<0> &o) const { return false; }
HOSTDEVICE
int64_t& operator[](int idx);
int64_t &operator[](int idx);
HOSTDEVICE
int64_t operator[](int idx) const;
};
......@@ -106,37 +104,28 @@ struct Dim<0> {
namespace {
// Helper for accessing Dim classes
template <int i>
struct DimGetter {
template <int i> struct DimGetter {
// Return a copy if Dim is const
template <typename D>
HOSTDEVICE static int64_t impl(const D& d) {
template <typename D> HOSTDEVICE static int64_t impl(const D &d) {
return DimGetter<i - 1>::impl(d.tail);
}
// Return a reference if Dim is mutable
template <typename D>
HOSTDEVICE static int64_t& impl(D& d) {
template <typename D> HOSTDEVICE static int64_t &impl(D &d) {
return DimGetter<i - 1>::impl(d.tail);
}
};
// Eureka! We found the element!
template <>
struct DimGetter<0> {
template <> struct DimGetter<0> {
// Return a copy if Dim is const
template <typename D>
HOSTDEVICE static int64_t impl(const D& d) {
template <typename D> HOSTDEVICE static int64_t impl(const D &d) {
return d.head;
}
// Return a reference if Dim is mutable
template <typename D>
HOSTDEVICE static int64_t& impl(D& d) {
return d.head;
}
template <typename D> HOSTDEVICE static int64_t &impl(D &d) { return d.head; }
};
template <int D>
HOSTDEVICE int64_t& indexer(Dim<D>& dim, int idx) {
template <int D> HOSTDEVICE int64_t &indexer(Dim<D> &dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
......@@ -150,8 +139,7 @@ HOSTDEVICE int64_t& indexer(Dim<D>& dim, int idx) {
return indexer(dim.tail, idx - 1);
}
template <>
HOSTDEVICE int64_t& indexer<0>(Dim<0>& dim, int idx) {
template <> HOSTDEVICE int64_t &indexer<0>(Dim<0> &dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
......@@ -167,8 +155,7 @@ HOSTDEVICE int64_t& indexer<0>(Dim<0>& dim, int idx) {
#endif
}
template <int D>
HOSTDEVICE int64_t indexer(const Dim<D>& dim, int idx) {
template <int D> HOSTDEVICE int64_t indexer(const Dim<D> &dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
......@@ -182,8 +169,7 @@ HOSTDEVICE int64_t indexer(const Dim<D>& dim, int idx) {
return indexer(dim.tail, idx - 1);
}
template <>
HOSTDEVICE int64_t indexer<0>(const Dim<0>& dim, int idx) {
template <> HOSTDEVICE int64_t indexer<0>(const Dim<0> &dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
......@@ -199,29 +185,25 @@ HOSTDEVICE int64_t indexer<0>(const Dim<0>& dim, int idx) {
#endif
}
} // namespace
} // namespace
// Static access to constant Dim
template <int i, int l>
HOSTDEVICE int64_t get(const Dim<l>& d) {
template <int i, int l> HOSTDEVICE int64_t get(const Dim<l> &d) {
return DimGetter<i>::impl(d);
}
// Static access to mutable Dim
template <int i, int l>
HOSTDEVICE int64_t& get(Dim<l>& d) {
template <int i, int l> HOSTDEVICE int64_t &get(Dim<l> &d) {
return DimGetter<i>::impl(d);
}
// Dynamic access to constant Dim
template <int l>
HOSTDEVICE int64_t Dim<l>::operator[](int i) const {
template <int l> HOSTDEVICE int64_t Dim<l>::operator[](int i) const {
// std::cout << "l: " << l << std::endl;
return indexer(*this, i);
}
// Dynamic access to mutable Dim
template <int l>
HOSTDEVICE int64_t& Dim<l>::operator[](int i) {
template <int l> HOSTDEVICE int64_t &Dim<l>::operator[](int i) {
return indexer(*this, i);
}
......@@ -231,54 +213,52 @@ inline HOSTDEVICE int64_t Dim<0>::operator[](int i) const {
}
// Dynamic access to mutable Dim
inline HOSTDEVICE int64_t& Dim<0>::operator[](int i) {
inline HOSTDEVICE int64_t &Dim<0>::operator[](int i) {
return indexer(*this, i);
}
// Dynamic access to constant Dim
// without std::enable_if will try to instantiate this on get<0>(d)
template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int64_t>::type get(const Dim<l>& d,
HOSTDEVICE typename std::enable_if<(l > 0), int64_t>::type get(const Dim<l> &d,
int i) {
return d[i];
}
// Dynamic access to mutable Dim
template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int64_t&>::type get(Dim<l>& d,
int i) {
HOSTDEVICE typename std::enable_if<(l > 0), int64_t &>::type get(Dim<l> &d,
int i) {
return d[i];
}
// Dot product of two dims
template <int i>
HOSTDEVICE int64_t linearize(const Dim<i>& a, const Dim<i>& b) {
HOSTDEVICE int64_t linearize(const Dim<i> &a, const Dim<i> &b) {
return a.head * b.head + linearize(a.tail, b.tail);
}
// Base case dot product of two Dims
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline int64_t linearize(const Dim<0>& a, const Dim<0>& b) {
HOSTDEVICE inline int64_t linearize(const Dim<0> &a, const Dim<0> &b) {
return 0;
}
// Product of a Dim
template <int i>
HOSTDEVICE int64_t product(const Dim<i>& a, int prod = 1) {
template <int i> HOSTDEVICE int64_t product(const Dim<i> &a, int prod = 1) {
return prod * a.head * product(a.tail);
}
// Base case product of a Dim
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline int64_t product(const Dim<0>& a, int prod) {
template <> HOSTDEVICE inline int64_t product(const Dim<0> &a, int prod) {
return prod;
}
// Is 0 <= idx_i < size_i for all i?
template <int i>
HOSTDEVICE bool contained(const Dim<i>& idx, const Dim<i>& size) {
HOSTDEVICE bool contained(const Dim<i> &idx, const Dim<i> &size) {
return ((0 <= idx.head) && (idx.head < size.head) &&
contained(idx.tail, size.tail));
}
......@@ -286,7 +266,7 @@ HOSTDEVICE bool contained(const Dim<i>& idx, const Dim<i>& size) {
// Base case of is 0 <= idx_i < size_i ?
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline bool contained(const Dim<0>& idx, const Dim<0>& size) {
HOSTDEVICE inline bool contained(const Dim<0> &idx, const Dim<0> &size) {
return true;
}
......@@ -294,15 +274,14 @@ HOSTDEVICE inline bool contained(const Dim<0>& idx, const Dim<0>& size) {
* \brief Compute exclusive prefix-multiply of a Dim.
*/
template <int i>
HOSTDEVICE Dim<i> ex_prefix_mul(const Dim<i>& src, int mul = 1) {
HOSTDEVICE Dim<i> ex_prefix_mul(const Dim<i> &src, int mul = 1) {
return Dim<i>(mul, ex_prefix_mul(src.tail, mul * src.head));
}
///\cond HIDDEN
// Base case of ex_prefix_mul
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline Dim<0> ex_prefix_mul(const Dim<0>& src, int mul) {
template <> HOSTDEVICE inline Dim<0> ex_prefix_mul(const Dim<0> &src, int mul) {
return Dim<0>();
}
///\endcond
......@@ -310,38 +289,36 @@ HOSTDEVICE inline Dim<0> ex_prefix_mul(const Dim<0>& src, int mul) {
/**
* Add two dimensions together
*/
template <int i>
HOSTDEVICE Dim<i> dim_plus(const Dim<i>& a, const Dim<i>& b) {
template <int i> HOSTDEVICE Dim<i> dim_plus(const Dim<i> &a, const Dim<i> &b) {
return Dim<i>(a.head + b.head, dim_plus(a.tail, b.tail));
}
// Base case
template <>
HOSTDEVICE inline Dim<0> dim_plus(const Dim<0>& a, const Dim<0>& b) {
HOSTDEVICE inline Dim<0> dim_plus(const Dim<0> &a, const Dim<0> &b) {
return Dim<0>();
}
template <int i>
HOSTDEVICE Dim<i> operator+(const Dim<i>& lhs, const Dim<i>& rhs) {
HOSTDEVICE Dim<i> operator+(const Dim<i> &lhs, const Dim<i> &rhs) {
return dim_plus(lhs, rhs);
}
/**
* Multiply two dimensions together
*/
template <int i>
HOSTDEVICE Dim<i> dim_mult(const Dim<i>& a, const Dim<i>& b) {
template <int i> HOSTDEVICE Dim<i> dim_mult(const Dim<i> &a, const Dim<i> &b) {
return Dim<i>(a.head * b.head, dim_mult(a.tail, b.tail));
}
// Base case
template <>
HOSTDEVICE inline Dim<0> dim_mult(const Dim<0>& a, const Dim<0>& b) {
HOSTDEVICE inline Dim<0> dim_mult(const Dim<0> &a, const Dim<0> &b) {
return Dim<0>();
}
template <int i>
HOSTDEVICE Dim<i> operator*(const Dim<i>& lhs, const Dim<i>& rhs) {
HOSTDEVICE Dim<i> operator*(const Dim<i> &lhs, const Dim<i> &rhs) {
return dim_mult(lhs, rhs);
}
......@@ -356,7 +333,7 @@ HOSTDEVICE Dim<i> operator*(const Dim<i>& lhs, const Dim<i>& rhs) {
*/
template <int i>
HOSTDEVICE Dim<i> normalize_strides(const Dim<i>& size, const Dim<i>& stride) {
HOSTDEVICE Dim<i> normalize_strides(const Dim<i> &size, const Dim<i> &stride) {
int norm_stride = size.head == 1 ? 0 : stride.head;
return Dim<i>(norm_stride, normalize_strides(size.tail, stride.tail));
}
......@@ -364,8 +341,8 @@ HOSTDEVICE Dim<i> normalize_strides(const Dim<i>& size, const Dim<i>& stride) {
///\cond HIDDEN
template <>
HOSTDEVICE inline Dim<0> normalize_strides(const Dim<0>& size,
const Dim<0>& stride) {
HOSTDEVICE inline Dim<0> normalize_strides(const Dim<0> &size,
const Dim<0> &stride) {
return Dim<0>();
}
......@@ -386,8 +363,8 @@ HOSTDEVICE Dim<sizeof...(Args)> make_dim(Args... idxes) {
// Allows us to output a Dim
// XXX For some reason, overloading fails to resolve this correctly
template <int i>
typename std::enable_if<(i > 1), std::ostream&>::type operator<<(
std::ostream& os, const Dim<i>& d) {
typename std::enable_if<(i > 1), std::ostream &>::type
operator<<(std::ostream &os, const Dim<i> &d) {
os << d.head << ", " << d.tail;
return os;
}
......@@ -395,18 +372,17 @@ typename std::enable_if<(i > 1), std::ostream&>::type operator<<(
// Base case that allows us to output a Dim
// XXX I wish this could be an overload instead of a template
template <int i>
typename std::enable_if<(i == 1), std::ostream&>::type operator<<(
std::ostream& os, const Dim<i>& d) {
typename std::enable_if<(i == 1), std::ostream &>::type
operator<<(std::ostream &os, const Dim<i> &d) {
os << d.head;
return os;
}
inline std::ostream& operator<<(std::ostream& os, const Dim<0>& d) {
inline std::ostream &operator<<(std::ostream &os, const Dim<0> &d) {
return os;
}
template <int i>
HOST std::string Dim<i>::to_string() const {
template <int i> HOST std::string Dim<i>::to_string() const {
std::stringstream stream;
stream << *this;
......@@ -428,5 +404,5 @@ HOSTDEVICE Dim<D> linear_to_dimension(int linear_index, Dim<D> extents) {
return result;
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -102,5 +102,5 @@ void Executor<Dtype>::predict(const Tensor &t, int block_id) {
template class Executor<CPU>;
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -34,13 +34,12 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
class Executor {
public:
template <typename Dtype> class Executor {
public:
Executor(const Program<Dtype> p);
std::shared_ptr<Tensor> predict(Tensor &t);
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
void predict(const Tensor &t, int block_id);
......@@ -50,5 +49,5 @@ class Executor {
bool use_optimize_ = false;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
此差异已折叠。
此差异已折叠。
......@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "lod_tensor.h"
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>
#include <stdint.h>
#include <string.h>
namespace paddle_mobile {
namespace framework {
......@@ -103,7 +103,8 @@ LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
LoD ToAbsOffset(const LoD &in) {
// the lowest level stores relative offsets
if (in.empty() || in.size() == 1) return in;
if (in.empty() || in.size() == 1)
return in;
LoD result = in;
for (auto level = static_cast<int>(in.size() - 2); level >= 0; level--) {
for (size_t i = 0; i < in[level].size(); ++i) {
......@@ -135,16 +136,20 @@ bool operator==(const LoD &a, const LoD &b) {
}
bool CheckLoD(const LoD &in, int tensor_height) {
if (in.empty()) return true;
if (in.empty())
return true;
for (const auto &level : in) {
// check: there should be more than 2 offsets existing in each level.
if (level.size() < 2) return false;
if (level.size() < 2)
return false;
// check: the first offset(the begin offset) of each level should be 0.
if (level.front() != 0) return false;
if (level.front() != 0)
return false;
// check: all the offsets in a level should be ascending(no same items
// allows).
if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
if (a < b) return true;
if (a < b)
return true;
return false;
})) {
std::cout << "ascending error";
......@@ -161,29 +166,34 @@ bool CheckLoD(const LoD &in, int tensor_height) {
// NOTE LoD store the levels from top to bottom, so the higher level goes
// first.
for (size_t level = 0; level < in.size() - 1; level++) {
if (in[level].back() != in[level + 1].size() - 1) return false;
if (in[level].back() != in[level + 1].size() - 1)
return false;
}
return true;
}
bool CheckAbsLoD(const LoD &in, int tensor_height) {
if (in.empty()) return true;
if (in.empty())
return true;
for (const auto &level : in) {
// check: all the offsets in a level should be ascending(no same items
// allows).
if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
if (a < b) return true;
if (a < b)
return true;
return false;
})) {
return false;
}
// check: there should be more than 2 offsets existing in each level.
if (level.size() < 2) return false;
if (level.size() < 2)
return false;
// check: the first offset of each level should be 0, and the last should be
// the same(the height of underlying tensor).
if (level.front() != 0) return false;
if (level.front() != 0)
return false;
if (tensor_height < 0) {
tensor_height = level.back();
} else if ((size_t)tensor_height != level.back()) {
......@@ -220,7 +230,7 @@ void AppendLoD(LoD *lod, const LoD &lod_length) {
// "The lod_length should has the same size with the appended lod.");
if (lod->empty()) {
for (size_t i = 0; i < lod_length.size(); ++i) {
lod->emplace_back(1, 0); // size = 1, value = 0;
lod->emplace_back(1, 0); // size = 1, value = 0;
}
*lod = LoD(lod_length.size(), std::vector<size_t>({0}));
}
......@@ -233,7 +243,7 @@ void AppendLoD(LoD *lod, const LoD &lod_length) {
}
void SerializeToStream(std::ostream &os, const LoDTensor &tensor) {
{ // the 1st field, uint32_t version for LoDTensor
{ // the 1st field, uint32_t version for LoDTensor
constexpr uint32_t version = 0;
os.write(reinterpret_cast<const char *>(&version), sizeof(version));
}
......@@ -284,5 +294,5 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor) {
TensorFromStream(is, static_cast<Tensor *>(tensor));
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -14,12 +14,12 @@ limitations under the License. */
#pragma once
#include "tensor.h"
#include "tensor_util.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensor.h"
#include "tensor_util.h"
namespace paddle_mobile {
......@@ -96,7 +96,7 @@ bool CheckAbsLoD(const LoD &in, int tensor_height = -1);
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
class LoDTensor : public Tensor {
public:
public:
LoDTensor() : Tensor() {}
explicit LoDTensor(const LoD &lod) : lod_(lod) {}
......@@ -131,7 +131,7 @@ class LoDTensor : public Tensor {
return (lod_)[level].size() - 1;
}
private:
private:
LoD lod_;
};
......@@ -181,8 +181,9 @@ LoDTensor LodExpand(const LoDTensor &source, const LoD &lod, size_t level) {
// Returns:
// LoD = [[1, 4], [2, 4, 2, 3, 2]]
// pair<size_t, size_t> = {11, 24}
std::pair<LoD, std::pair<size_t, size_t>> GetSubLoDAndAbsoluteOffset(
const LoD &lod, size_t start_idx, size_t end_idx, size_t start_level);
std::pair<LoD, std::pair<size_t, size_t>>
GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, size_t end_idx,
size_t start_level);
void AppendLoD(LoD *lod, const LoD &lod_length);
......@@ -195,5 +196,5 @@ void SerializeToStream(std::ostream &os, const LoDTensor &tensor);
void DeserializeFromStream(std::istream &is, LoDTensor *tensor);
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -55,5 +55,5 @@ const std::unordered_map<std::string, Attribute> &OpDesc::GetAttrMap() const {
return attrs_;
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -26,7 +26,7 @@ namespace paddle_mobile {
namespace framework {
class OpDesc : PaddleMobileObject {
public:
public:
OpDesc(const proto::OpDesc &desc);
const std::vector<std::string> &Input(const std::string &name) const;
const std::vector<std::string> &Output(const std::string &name) const;
......@@ -40,12 +40,12 @@ class OpDesc : PaddleMobileObject {
const std::string &Type() { return desc_.type(); };
private:
private:
proto::OpDesc desc_;
VariableNameMap inputs_;
VariableNameMap outputs_;
AttributeMap attrs_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -24,42 +24,38 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
struct OpInfo {
template <typename Dtype> struct OpInfo {
OpCreator<Dtype> creator_;
const OpCreator<Dtype>& Creator() const {
const OpCreator<Dtype> &Creator() const {
// PADDLE_ENFORCE_NOT_NULL(creator_,
// "Operator Creator has not been registered");
return creator_;
}
};
template <typename Dtype>
class OpInfoMap;
template <typename Dtype> class OpInfoMap;
template <typename Dtype>
static OpInfoMap<Dtype>* g_op_info_map = nullptr;
template <typename Dtype> static OpInfoMap<Dtype> *g_op_info_map = nullptr;
template <typename Dtype>
class OpInfoMap {
public:
static OpInfoMap& Instance() {
template <typename Dtype> class OpInfoMap {
public:
static OpInfoMap &Instance() {
if (g_op_info_map<Dtype> == nullptr) {
g_op_info_map<Dtype> = new OpInfoMap();
}
return *g_op_info_map<Dtype>;
};
bool Has(const std::string& op_type) const {
bool Has(const std::string &op_type) const {
return map_.find(op_type) != map_.end();
}
void Insert(const std::string& type, const OpInfo<Dtype>& info) {
void Insert(const std::string &type, const OpInfo<Dtype> &info) {
// PADDLE_ENFORCE(!Has(type), "Operator %s has been registered", type);
map_.insert({type, info});
}
const OpInfo<Dtype>& Get(const std::string& type) const {
const OpInfo<Dtype> &Get(const std::string &type) const {
auto op_info_ptr = GetNullable(type);
// PADDLE_ENFORCE_NOT_NULL(op_info_ptr, "Operator %s has not been
// registered",
......@@ -67,7 +63,7 @@ class OpInfoMap {
return *op_info_ptr;
}
const OpInfo<Dtype>* GetNullable(const std::string& type) const {
const OpInfo<Dtype> *GetNullable(const std::string &type) const {
auto it = map_.find(type);
if (it == map_.end()) {
return nullptr;
......@@ -76,20 +72,20 @@ class OpInfoMap {
}
}
const std::unordered_map<std::string, OpInfo<Dtype>>& map() const {
const std::unordered_map<std::string, OpInfo<Dtype>> &map() const {
return map_;
}
std::unordered_map<std::string, OpInfo<Dtype>>* mutable_map() {
std::unordered_map<std::string, OpInfo<Dtype>> *mutable_map() {
return &map_;
}
private:
private:
OpInfoMap() = default;
std::unordered_map<std::string, OpInfo<Dtype>> map_;
// DISABLE_COPY_AND_ASSIGN(OpInfoMap);
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -25,7 +25,7 @@ namespace paddle_mobile {
namespace framework {
struct OpKernelType {
struct Hash {
size_t operator()(const OpKernelType& key) const {
size_t operator()(const OpKernelType &key) const {
int data_type = static_cast<int>(key.data_type_) << LEFT_SHIFT;
int data_layout = static_cast<int>(key.data_layout_) << (LEFT_SHIFT * 2);
......@@ -44,21 +44,21 @@ struct OpKernelType {
DataLayout data_layout = DataLayout::kAnyLayout)
: data_type_(data_type), data_layout_(data_layout) {}
bool operator==(const OpKernelType& o) const {
bool operator==(const OpKernelType &o) const {
return data_type_ == o.data_type_ && data_layout_ == o.data_layout_;
}
bool operator!=(const OpKernelType& o) const { return !(*this == o); }
bool operator!=(const OpKernelType &o) const { return !(*this == o); }
};
inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) {
inline bool NeedTransformLayout(const DataLayout &l, const DataLayout &r) {
return l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r;
}
inline bool TransFromNeeded(const OpKernelType& l, const OpKernelType& r) {
inline bool TransFromNeeded(const OpKernelType &l, const OpKernelType &r) {
return (l.data_type_ != r.data_type_) ||
NeedTransformLayout(l.data_layout_, r.data_layout_);
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -22,5 +22,5 @@ namespace paddle_mobile {
namespace framework {
// this class not only make proto but also init attribute checkers.
class OpProtoAndCheckerMaker {};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -23,23 +23,17 @@ namespace paddle_mobile {
namespace framework {
template <typename Dtype>
OperatorBase<Dtype>::OperatorBase(const std::string& type,
const VariableNameMap& inputs,
const VariableNameMap& outputs,
const AttributeMap& attrs,
OperatorBase<Dtype>::OperatorBase(const std::string &type,
const VariableNameMap &inputs,
const VariableNameMap &outputs,
const AttributeMap &attrs,
std::shared_ptr<Scope> scope)
: type_(type),
inputs_(inputs),
outputs_(outputs),
attrs_(attrs),
: type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs),
scope_(scope) {
CheckAllInputOutputSet();
}
template <typename Dtype>
void OperatorBase<Dtype>::Run() {
RunImpl();
}
template <typename Dtype> void OperatorBase<Dtype>::Run() { RunImpl(); }
template <typename Dtype>
void OperatorBase<Dtype>::CheckAllInputOutputSet() const {}
......@@ -47,5 +41,5 @@ void OperatorBase<Dtype>::CheckAllInputOutputSet() const {}
template class OperatorBase<CPU>;
template class OperatorWithKernel<CPU>;
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -35,53 +35,51 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
class OperatorBase : PaddleMobileObject {
public:
OperatorBase(const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const AttributeMap& attrs,
template <typename Dtype> class OperatorBase : PaddleMobileObject {
public:
OperatorBase(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope);
virtual ~OperatorBase() {}
virtual void Run();
const VariableNameMap& Inputs() const { return inputs_; }
const VariableNameMap& Outputs() const { return outputs_; }
const std::string& Type() const { return type_; }
const AttributeMap& Attrs() const { return attrs_; }
const VariableNameMap &Inputs() const { return inputs_; }
const VariableNameMap &Outputs() const { return outputs_; }
const std::string &Type() const { return type_; }
const AttributeMap &Attrs() const { return attrs_; }
protected:
protected:
std::shared_ptr<Scope> scope_;
std::string type_;
VariableNameMap inputs_;
VariableNameMap outputs_;
AttributeMap attrs_;
private:
private:
void CheckAllInputOutputSet() const;
virtual void RunImpl() const = 0;
};
template <typename Dtype>
class OperatorWithKernel : public OperatorBase<Dtype> {
public:
OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const AttributeMap& attrs,
public:
OperatorWithKernel(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope)
: OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {}
virtual void InferShape() const = 0;
protected:
protected:
virtual void RunImpl() const = 0;
private:
private:
};
template <typename Dtype, typename P>
class OpKernelBase : PaddleMobileObject {
public:
virtual void Compute(const P& para) const = 0;
template <typename Dtype, typename P> class OpKernelBase : PaddleMobileObject {
public:
virtual void Compute(const P &para) const = 0;
virtual ~OpKernelBase() = default;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -18,19 +18,19 @@ SOFTWARE.
#pragma once
#include <string>
#include "stdio.h"
#include <string>
namespace paddle_mobile {
class PaddleMobileObject {
public:
virtual inline const std::string& ToString() {
public:
virtual inline const std::string &ToString() {
char address[128] = {0};
sprintf(address, "%p", this);
return std::string(address);
}
private:
private:
};
} // namespace paddle_mobile
} // namespace paddle_mobile
......@@ -18,4 +18,4 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {}
} // namespace paddle_mobile
} // namespace paddle_mobile
......@@ -28,13 +28,13 @@ namespace framework {
template <typename Dtype, Precision P = Precision::FP32>
class Program : PaddleMobileObject {
public:
public:
std::shared_ptr<ProgramDesc> originProgram;
std::shared_ptr<ProgramDesc> optimizeProgram;
std::shared_ptr<Scope> scope;
private:
private:
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -18,5 +18,5 @@ std::shared_ptr<BlockDesc> ProgramDesc::Block(size_t idx) {
return blocks_[idx];
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -28,15 +28,15 @@ namespace paddle_mobile {
namespace framework {
class ProgramDesc : PaddleMobileObject {
public:
public:
ProgramDesc(const proto::ProgramDesc &desc);
std::shared_ptr<BlockDesc> Block(size_t idx);
const std::vector<std::shared_ptr<BlockDesc>> &Blocks() { return blocks_; };
private:
private:
std::vector<std::shared_ptr<BlockDesc>> blocks_;
proto::ProgramDesc desc_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -112,5 +112,5 @@ Variable *Scope::FindVarLocally(const std::string &name) const {
return nullptr;
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -18,38 +18,38 @@ SOFTWARE.
==============================================================================*/
#pragma once
#include <list> //std::list
#include <mutex> //std::mutex
#include <unordered_map> //std::unordered_map
#include "variable.h"
#include <list> //std::list
#include <mutex> //std::mutex
#include <unordered_map> //std::unordered_map
namespace paddle_mobile {
namespace framework {
class Scope {
public:
public:
Scope() {}
~Scope() {}
Scope& NewScope() const;
Scope &NewScope() const;
/// Create a variable with given name if it doesn't exist.
Variable* Var(const std::string& name);
Variable *Var(const std::string &name);
/// Create a variable with a scope-unique name.
Variable* Var(std::string* name = nullptr);
Variable *Var(std::string *name = nullptr);
void EraseVars(const std::vector<std::string>& var_names);
void EraseVars(const std::vector<std::string> &var_names);
/// Find a variable in the scope or any of its ancestors. Returns
/// nullptr if cannot find.
Variable* FindVar(const std::string& name) const;
Variable *FindVar(const std::string &name) const;
const Scope* parent() const { return parent_; }
const Scope *parent() const { return parent_; }
/// Find the scope or an ancestor scope that contains the given variable.
const Scope* FindScope(const Variable* var) const;
const Scope *FindScope(const Variable *var) const;
void DeleteScope(Scope* scope) const;
void DeleteScope(Scope *scope) const;
/// Drop all kids scopes belonged to this scope.
void DropKids();
......@@ -58,23 +58,23 @@ class Scope {
std::vector<std::string> LocalVarNames() const;
// Rename variable to a new name
void Rename(const std::string& origin_name,
const std::string& new_name) const;
void Rename(const std::string &origin_name,
const std::string &new_name) const;
// Rename variable to a new name and return the new name
std::string Rename(const std::string& origin_name) const;
std::string Rename(const std::string &origin_name) const;
Variable* FindVarLocally(const std::string& name) const;
Variable *FindVarLocally(const std::string &name) const;
private:
private:
// Call Scope::NewScope for a sub-scope.
explicit Scope(Scope const* parent) : parent_(parent) {}
explicit Scope(Scope const *parent) : parent_(parent) {}
mutable std::unordered_map<std::string, Variable*> vars_;
mutable std::list<Scope*> kids_;
Scope const* parent_{nullptr};
mutable std::unordered_map<std::string, Variable *> vars_;
mutable std::list<Scope *> kids_;
Scope const *parent_{nullptr};
mutable std::mutex mutex_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -27,8 +27,8 @@ namespace paddle_mobile {
namespace framework {
class SelectedRows {
public:
SelectedRows(const std::vector<int64_t>& rows, const int64_t& height)
public:
SelectedRows(const std::vector<int64_t> &rows, const int64_t &height)
: rows_(rows), height_(height) {
value_.reset(new Tensor());
}
......@@ -38,19 +38,19 @@ class SelectedRows {
value_.reset(new Tensor());
}
const Tensor& value() const { return *value_; }
const Tensor &value() const { return *value_; }
Tensor* mutable_value() { return value_.get(); }
Tensor *mutable_value() { return value_.get(); }
int64_t height() const { return height_; }
void set_height(int64_t height) { height_ = height; }
const std::vector<int64_t>& rows() const { return rows_; }
const std::vector<int64_t> &rows() const { return rows_; }
std::vector<int64_t>* mutable_rows() { return &rows_; }
std::vector<int64_t> *mutable_rows() { return &rows_; }
void set_rows(const std::vector<int64_t>& rows) { rows_ = rows; }
void set_rows(const std::vector<int64_t> &rows) { rows_ = rows; }
/**
* get the index of id in rows
......@@ -67,7 +67,7 @@ class SelectedRows {
return make_ddim(dims);
}
private:
private:
// Notice: rows can be duplicate. We can have {0, 4, 7, 0, 5, 7, 9} here.
// SelectedRows are simply concated when adding together. Until a
// SelectedRows add a Tensor, will the duplicate rows be handled.
......@@ -76,5 +76,5 @@ class SelectedRows {
int64_t height_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -26,11 +26,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
template <typename... T>
struct SizeOfTypeFunctor;
template <typename... T> struct SizeOfTypeFunctor;
template <typename T>
struct SizeOfTypeFunctor<T> {
template <typename T> struct SizeOfTypeFunctor<T> {
size_t operator()(std::type_index type) const {
if (typeid(T).hash_code() == type.hash_code()) {
return sizeof(T);
......@@ -40,8 +38,7 @@ struct SizeOfTypeFunctor<T> {
}
};
template <>
struct SizeOfTypeFunctor<> {
template <> struct SizeOfTypeFunctor<> {
size_t operator()(std::type_index type) const { return 0UL; }
};
......@@ -68,12 +65,11 @@ static inline size_t SizeOfType(std::type_index type) {
class LoDTensor;
class Tensor {
public:
public:
Tensor() : offset_(0) {}
/*! Return a pointer to mutable memory block. */
template <typename T>
inline T *data() {
template <typename T> inline T *data() {
check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value ||
// holder_->type().hash_code() == typeid(T).hash_code(),
......@@ -84,8 +80,7 @@ class Tensor {
}
/*! Return a pointer to constant memory block. */
template <typename T>
inline const T *data() const {
template <typename T> inline const T *data() const {
check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value ||
// holder_->type().hash_code() == typeid(T).hash_code(),
......@@ -102,8 +97,7 @@ class Tensor {
* @brief Return a pointer to mutable memory block.
* @note If not exist, then allocation.
*/
template <typename T>
inline T *mutable_data() {
template <typename T> inline T *mutable_data() {
static_assert(std::is_pod<T>::value, "T must be POD");
return reinterpret_cast<T *>(mutable_data(typeid(T)));
}
......@@ -141,8 +135,7 @@ class Tensor {
*
* @note If not exist, then allocation.
*/
template <typename T>
inline T *mutable_data(DDim dims) {
template <typename T> inline T *mutable_data(DDim dims) {
static_assert(std::is_pod<T>::value, "T must be POD");
Resize(dims);
return mutable_data<T>();
......@@ -227,7 +220,7 @@ class Tensor {
inline void set_layout(const DataLayout layout) { layout_ = layout; }
private:
private:
/**
* @note Placeholder hides type T, so it doesn't appear as a template
* parameter of Variable.
......@@ -248,8 +241,7 @@ class Tensor {
PlaceholderImpl(size_t size, std::type_index type)
: ptr_(static_cast<uint8_t *>(memory::Alloc(size)),
memory::PODDeleter<uint8_t>()),
size_(size),
type_(type) {
size_(size), type_(type) {
// PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s
// memory to allocation.",
// (is_cpu_place(place_) ?
......@@ -315,5 +307,5 @@ inline Tensor ReshapeToMatrix(const Tensor &src, int num_col_dims) {
return res;
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -20,7 +20,7 @@
namespace paddle_mobile {
namespace framework {
void TensorCopy(const Tensor& src, Tensor* dst) {
void TensorCopy(const Tensor &src, Tensor *dst) {
// VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to
// "
// << dst_place;
......@@ -37,7 +37,7 @@ void TensorCopy(const Tensor& src, Tensor* dst) {
memory::Copy(dst_ptr, src_ptr, size);
}
void TensorCopySync(const Tensor& src, Tensor* dst) {
void TensorCopySync(const Tensor &src, Tensor *dst) {
// VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
// << " to " << dst_place;
src.check_memory_size();
......@@ -49,17 +49,15 @@ void TensorCopySync(const Tensor& src, Tensor* dst) {
memory::Copy(dst_ptr, src_ptr, size);
}
template <typename Predicate>
struct AnyDTypeVisitor {
template <typename Predicate> struct AnyDTypeVisitor {
Predicate predicate_;
const Tensor& tensor_;
Tensor* out_;
const Tensor &tensor_;
Tensor *out_;
AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, Tensor* out)
AnyDTypeVisitor(Predicate predicate, const Tensor &tensor, Tensor *out)
: predicate_(predicate), tensor_(tensor), out_(out) {}
template <typename T>
void operator()() const {
template <typename T> void operator()() const {
// auto t = EigenVector<T>::Flatten(tensor_);
// auto o = EigenScalar<bool>::From(*out_);
// return any of predicate_(t) is true.
......@@ -68,18 +66,17 @@ struct AnyDTypeVisitor {
};
template <typename Predicate>
inline void AnyImpl(Predicate predicate, const Tensor& tensor,
framework::Tensor* out) {
inline void AnyImpl(Predicate predicate, const Tensor &tensor,
framework::Tensor *out) {
VisitDataType(ToDataType(tensor.type()),
AnyDTypeVisitor<Predicate>(predicate, tensor, out));
}
template <typename Predicate>
struct AnyVisitor {
const framework::Tensor& tensor_;
template <typename Predicate> struct AnyVisitor {
const framework::Tensor &tensor_;
Predicate predicate_;
AnyVisitor(const framework::Tensor& tensor, Predicate predicate)
AnyVisitor(const framework::Tensor &tensor, Predicate predicate)
: tensor_(tensor), predicate_(std::move(predicate)) {}
bool operator()(void) const {
......@@ -90,13 +87,13 @@ struct AnyVisitor {
return this->GetResult(out);
}
bool GetResult(const framework::Tensor& out) const {
bool GetResult(const framework::Tensor &out) const {
return *out.data<bool>();
}
};
template <typename Predicate>
inline bool Any(const framework::Tensor& tensor, Predicate predicate) {
inline bool Any(const framework::Tensor &tensor, Predicate predicate) {
AnyVisitor<Predicate> visitor(tensor, predicate);
// return platform::VisitPlace(visitor);
return visitor();
......@@ -104,101 +101,100 @@ inline bool Any(const framework::Tensor& tensor, Predicate predicate) {
struct ContainsNANPredicate {
template <typename T>
auto operator()(const T& eigen_vec) const
auto operator()(const T &eigen_vec) const
-> decltype(std::declval<T>().isnan()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isnan();
}
};
bool TensorContainsNAN(const framework::Tensor& tensor) {
bool TensorContainsNAN(const framework::Tensor &tensor) {
ContainsNANPredicate predicate;
return Any(tensor, predicate);
}
struct ContainsInfPredicate {
template <typename T>
auto operator()(const T& eigen_vec) const
auto operator()(const T &eigen_vec) const
-> decltype(std::declval<T>().isinf()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isinf();
}
};
bool TensorContainsInf(const framework::Tensor& tensor) {
bool TensorContainsInf(const framework::Tensor &tensor) {
ContainsInfPredicate predicate;
return Any(tensor, predicate);
}
void TensorToStream(std::ostream& os, const Tensor& tensor) {
{ // the 1st field, uint32_t version
void TensorToStream(std::ostream &os, const Tensor &tensor) {
{ // the 1st field, uint32_t version
constexpr uint32_t version = 0;
os.write(reinterpret_cast<const char*>(&version), sizeof(version));
os.write(reinterpret_cast<const char *>(&version), sizeof(version));
}
{ // the 2nd field, tensor description
// int32_t size
// void* protobuf message
{ // the 2nd field, tensor description
// int32_t size
// void* protobuf message
proto::VarType::TensorDesc desc;
desc.set_data_type(framework::ToDataType(tensor.type()));
auto dims = framework::vectorize(tensor.dims());
auto* pb_dims = desc.mutable_dims();
auto *pb_dims = desc.mutable_dims();
pb_dims->Resize(static_cast<int>(dims.size()), 0);
std::copy(dims.begin(), dims.end(), pb_dims->begin());
int32_t size = desc.ByteSize();
os.write(reinterpret_cast<const char*>(&size), sizeof(size));
os.write(reinterpret_cast<const char *>(&size), sizeof(size));
auto out = desc.SerializeAsString();
os.write(out.data(), size);
}
{ // the 3rd field, tensor data
{ // the 3rd field, tensor data
uint64_t size = tensor.memory_size();
auto* data_ptr = tensor.data<void>();
auto *data_ptr = tensor.data<void>();
// PADDLE_ENFORCE(size < std::numeric_limits<std::streamsize>::max(),
// "Index overflow when writing tensor");
os.write(static_cast<const char*>(data_ptr),
os.write(static_cast<const char *>(data_ptr),
static_cast<std::streamsize>(size));
}
}
struct DeserializedDataFunctor {
DeserializedDataFunctor(void** buf, Tensor* tensor)
DeserializedDataFunctor(void **buf, Tensor *tensor)
: buf_(buf), tensor_(tensor) {}
template <typename T>
void operator()() {
template <typename T> void operator()() {
*buf_ = tensor_->mutable_data<T>();
}
void** buf_;
Tensor* tensor_;
void **buf_;
Tensor *tensor_;
};
void TensorFromStream(std::istream& is, framework::Tensor* tensor) {
void TensorFromStream(std::istream &is, framework::Tensor *tensor) {
uint32_t version;
is.read(reinterpret_cast<char*>(&version), sizeof(version));
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported");
proto::VarType::TensorDesc desc;
{ // int32_t size
// proto buffer
{ // int32_t size
// proto buffer
int32_t size;
is.read(reinterpret_cast<char*>(&size), sizeof(size));
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char*>(buf.get()), size);
is.read(reinterpret_cast<char *>(buf.get()), size);
// PADDLE_ENFORCE(desc.ParseFromArray(buf.get(), size),
// "Cannot parse tensor desc");
}
{ // read tensor
{ // read tensor
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
tensor->Resize(framework::make_ddim(dims));
void* buf;
void *buf;
framework::VisitDataType(desc.data_type(),
DeserializedDataFunctor(&buf, tensor));
is.read(static_cast<char*>(buf), tensor->memory_size());
is.read(static_cast<char *>(buf), tensor->memory_size());
}
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -13,54 +13,54 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "framework.pb.h"
#include "memory/t_malloc.h"
#include "platform/data_type.h"
#include "tensor.h"
#include <vector>
namespace paddle_mobile {
namespace framework {
void TensorCopy(const Tensor& src, Tensor* dst);
void TensorCopySync(const Tensor& src, Tensor* dst);
void TensorCopy(const Tensor &src, Tensor *dst);
void TensorCopySync(const Tensor &src, Tensor *dst);
template <typename T>
void TensorFromVector(const std::vector<T>& src, Tensor* dst);
void TensorFromVector(const std::vector<T> &src, Tensor *dst);
template <typename T>
void TesnorToVector(const Tensor& src, std::vector<T>* dst);
void TesnorToVector(const Tensor &src, std::vector<T> *dst);
bool TensorContainsNAN(const framework::Tensor& tensor);
bool TensorContainsInf(const framework::Tensor& tensor);
bool TensorContainsNAN(const framework::Tensor &tensor);
bool TensorContainsInf(const framework::Tensor &tensor);
void TensorToStream(std::ostream& os, const Tensor& tensor);
void TensorFromStream(std::istream& is, Tensor* tensor);
void TensorToStream(std::ostream &os, const Tensor &tensor);
void TensorFromStream(std::istream &is, Tensor *tensor);
//
// The implementation of template functions.
//
template <typename T>
void TensorFromVector(const std::vector<T>& src, Tensor* dst) {
auto src_ptr = static_cast<const void*>(src.data());
void TensorFromVector(const std::vector<T> &src, Tensor *dst) {
auto src_ptr = static_cast<const void *>(src.data());
dst->Resize({static_cast<int64_t>(src.size())});
auto dst_ptr = static_cast<void*>(dst->mutable_data<T>());
auto dst_ptr = static_cast<void *>(dst->mutable_data<T>());
auto size = src.size() * sizeof(T);
memory::Copy(dst_ptr, src_ptr, size);
}
template <typename T>
void TensorToVector(const Tensor& src, std::vector<T>* dst) {
auto src_ptr = static_cast<const void*>(src.data<T>());
void TensorToVector(const Tensor &src, std::vector<T> *dst) {
auto src_ptr = static_cast<const void *>(src.data<T>());
auto size = src.numel() * sizeof(T);
dst->resize(src.numel());
auto dst_ptr = static_cast<void*>(dst->data());
auto dst_ptr = static_cast<void *>(dst->data());
memory::Copy(dst_ptr, src_ptr, size);
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -24,5 +24,5 @@ namespace framework {
VarDesc::VarDesc(const proto::VarDesc &desc) : desc_(desc) {}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -25,7 +25,7 @@ namespace paddle_mobile {
namespace framework {
class VarDesc {
public:
public:
VarDesc(const proto::VarDesc &desc);
std::string Name() const { return desc_.name(); }
......@@ -36,33 +36,33 @@ class VarDesc {
const proto::VarType::ChannelDesc &channel_desc() const {
switch (desc_.type().type()) {
case proto::VarType::CHANNEL:
return desc_.type().channel();
default:
break;
case proto::VarType::CHANNEL:
return desc_.type().channel();
default:
break;
}
}
const proto::VarType::TensorDesc &tensor_desc() const {
switch (desc_.type().type()) {
case proto::VarType::SELECTED_ROWS:
return desc_.type().selected_rows();
case proto::VarType::LOD_TENSOR:
return desc_.type().lod_tensor().tensor();
case proto::VarType::LOD_TENSOR_ARRAY:
return desc_.type().tensor_array().tensor();
default:
break;
case proto::VarType::SELECTED_ROWS:
return desc_.type().selected_rows();
case proto::VarType::LOD_TENSOR:
return desc_.type().lod_tensor().tensor();
case proto::VarType::LOD_TENSOR_ARRAY:
return desc_.type().tensor_array().tensor();
default:
break;
}
}
proto::VarType::Type GetDataType() const {
switch (desc_.type().type()) {
case proto::VarType::CHANNEL:
return channel_desc().data_type();
break;
default:
return tensor_desc().data_type();
case proto::VarType::CHANNEL:
return channel_desc().data_type();
break;
default:
return tensor_desc().data_type();
}
}
......@@ -80,9 +80,9 @@ class VarDesc {
return this->RepeatedToVector(tensor_desc().dims());
}
private:
private:
proto::VarDesc desc_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -34,5 +34,5 @@ inline proto::VarType::Type ToVarType(std::type_index type) {
}
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -18,42 +18,39 @@ SOFTWARE.
==============================================================================*/
#pragma once
#include "paddle_mobile_object.h"
#include <iostream>
#include <memory>
#include <string>
#include <typeindex>
#include <typeinfo>
#include "paddle_mobile_object.h"
namespace paddle_mobile {
namespace framework {
class Variable : public PaddleMobileObject {
public:
public:
Variable() {}
~Variable() {}
template <typename T>
const T* Get() const {
return static_cast<const T*>(holder_->Ptr());
template <typename T> const T *Get() const {
return static_cast<const T *>(holder_->Ptr());
}
bool IsInitialized() const { return holder_ != nullptr; }
const std::string* Name() { return name_; }
const std::string *Name() { return name_; }
template <typename T>
T* GetMutable() {
template <typename T> T *GetMutable() {
if (!IsType<T>()) {
if (*Name() == "pixel") {
// std::cout << " reset " << *Name() << std::endl;
}
holder_.reset(new PlaceholderImp<T>(new T()));
}
return static_cast<T*>(holder_->Ptr());
return static_cast<T *>(holder_->Ptr());
}
template <typename T>
bool IsType() const {
template <typename T> bool IsType() const {
if (holder_) {
// printf("not null \n");
printf(" holder type : %s, this type %s \n", holder_->Type().name(),
......@@ -69,33 +66,32 @@ class Variable : public PaddleMobileObject {
std::type_index Type() const { return holder_->Type(); }
void SetName(const std::string* name) { name_ = name; }
void SetName(const std::string *name) { name_ = name; }
private:
private:
struct Placeholder {
Placeholder() = default;
virtual ~Placeholder() = default;
virtual const std::type_info& Type() const = 0;
virtual void* Ptr() const = 0;
virtual const std::type_info &Type() const = 0;
virtual void *Ptr() const = 0;
};
template <typename T>
struct PlaceholderImp : public Placeholder {
explicit PlaceholderImp(T* ptr) : ptr_(ptr), type_(typeid(T)) {}
template <typename T> struct PlaceholderImp : public Placeholder {
explicit PlaceholderImp(T *ptr) : ptr_(ptr), type_(typeid(T)) {}
virtual const std::type_info& Type() const { return type_; }
virtual void* Ptr() const override {
return static_cast<void*>(ptr_.get());
virtual const std::type_info &Type() const { return type_; }
virtual void *Ptr() const override {
return static_cast<void *>(ptr_.get());
}
std::unique_ptr<T> ptr_;
const std::type_info& type_;
const std::type_info &type_;
};
std::unique_ptr<Placeholder> holder_;
friend class Scope;
const std::string* name_;
const std::string *name_;
};
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -45,10 +45,10 @@ void Loader<Dtype, P>::LoadVar(framework::LoDTensor *tensor,
std::ifstream is(file_path);
std::streampos pos = is.tellg(); // save current position
std::streampos pos = is.tellg(); // save current position
is.seekg(0, std::ios::end);
// std::cout << " file length = " << is.tellg() << std::endl;
is.seekg(pos); // restore saved position
is.seekg(pos); // restore saved position
// 1. version
uint32_t version;
......@@ -106,34 +106,34 @@ void Loader<Dtype, P>::LoadVar(framework::LoDTensor *tensor,
int type_size = 0;
// std::cout << " desc pre type: ";
switch (desc.data_type()) {
case framework::proto::VarType::FP16:
// std::cout << "FP16" << std::endl;
type_size = 2;
break;
case framework::proto::VarType::FP32:
type_size = 4;
memory = tensor->mutable_data<float>();
// std::cout << "FP32" << std::endl;
break;
case framework::proto::VarType::FP64:
type_size = 8;
// std::cout << "FP64" << std::endl;
break;
case framework::proto::VarType::INT32:
type_size = 4;
// std::cout << "INT32" << std::endl;
break;
case framework::proto::VarType::INT64:
type_size = 8;
// std::cout << "INT64" << std::endl;
break;
case framework::proto::VarType::BOOL:
type_size = 1;
// std::cout << "BOOL" << std::endl;
break;
default:
break;
// std::cout << " not support" << std::endl;
case framework::proto::VarType::FP16:
// std::cout << "FP16" << std::endl;
type_size = 2;
break;
case framework::proto::VarType::FP32:
type_size = 4;
memory = tensor->mutable_data<float>();
// std::cout << "FP32" << std::endl;
break;
case framework::proto::VarType::FP64:
type_size = 8;
// std::cout << "FP64" << std::endl;
break;
case framework::proto::VarType::INT32:
type_size = 4;
// std::cout << "INT32" << std::endl;
break;
case framework::proto::VarType::INT64:
type_size = 8;
// std::cout << "INT64" << std::endl;
break;
case framework::proto::VarType::BOOL:
type_size = 1;
// std::cout << "BOOL" << std::endl;
break;
default:
break;
// std::cout << " not support" << std::endl;
}
// std::cout << " malloc size: " << memory_size * type_size << std::endl;
......@@ -143,8 +143,8 @@ void Loader<Dtype, P>::LoadVar(framework::LoDTensor *tensor,
};
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname) {
const framework::Program<Dtype, P>
Loader<Dtype, P>::Load(const std::string &dirname) {
std::string model_filename = dirname + "/__model__";
std::string program_desc_str;
ReadBinaryFile(model_filename, &program_desc_str);
......@@ -217,43 +217,43 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
// std::cout << " attr type: " << attr.type() << std::endl;
switch (attr.type()) {
case framework::proto::AttrType::BOOLEAN:
// std::cout << " boolen: " << attr.b() << std::endl;
break;
case framework::proto::AttrType::INT:
// std::cout << " int: " << attr.i() << std::endl;
break;
case framework::proto::AttrType::FLOAT:
// std::cout << " float: " << attr.f() << std::endl;
case framework::proto::AttrType::STRING:
// std::cout << " string: " << attr.s() << std::endl;
case framework::proto::AttrType::BOOLEANS:
// std::vector<bool>
// bools(attr.bools_size());
for (int y = 0; y < attr.bools_size(); ++y) {
// std::cout << " bool - " << attr.bools(y) <<
// std::endl;
}
case framework::proto::AttrType::LONG:
// std::cout << " long: " << attr.l() << std::endl;
case framework::proto::AttrType::FLOATS:
for (int y = 0; y < attr.floats_size(); ++y) {
// std::cout << " float - " << y << ": " <<
// attr.floats(y)
// << std::endl;
}
case framework::proto::AttrType::INTS:
for (int y = 0; y < attr.ints_size(); ++y) {
// std::cout << " int - " << y << ": " <<
// attr.ints(y)
// << std::endl;
}
case framework::proto::AttrType::STRINGS:
for (int y = 0; y < attr.strings_size(); ++y) {
// std::cout << " string - " << y << ": " <<
// attr.strings(y)
// << std::endl;
}
case framework::proto::AttrType::BOOLEAN:
// std::cout << " boolen: " << attr.b() << std::endl;
break;
case framework::proto::AttrType::INT:
// std::cout << " int: " << attr.i() << std::endl;
break;
case framework::proto::AttrType::FLOAT:
// std::cout << " float: " << attr.f() << std::endl;
case framework::proto::AttrType::STRING:
// std::cout << " string: " << attr.s() << std::endl;
case framework::proto::AttrType::BOOLEANS:
// std::vector<bool>
// bools(attr.bools_size());
for (int y = 0; y < attr.bools_size(); ++y) {
// std::cout << " bool - " << attr.bools(y) <<
// std::endl;
}
case framework::proto::AttrType::LONG:
// std::cout << " long: " << attr.l() << std::endl;
case framework::proto::AttrType::FLOATS:
for (int y = 0; y < attr.floats_size(); ++y) {
// std::cout << " float - " << y << ": " <<
// attr.floats(y)
// << std::endl;
}
case framework::proto::AttrType::INTS:
for (int y = 0; y < attr.ints_size(); ++y) {
// std::cout << " int - " << y << ": " <<
// attr.ints(y)
// << std::endl;
}
case framework::proto::AttrType::STRINGS:
for (int y = 0; y < attr.strings_size(); ++y) {
// std::cout << " string - " << y << ": " <<
// attr.strings(y)
// << std::endl;
}
}
}
}
......@@ -280,10 +280,10 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
// std::cout << " to load " << var.name() << std::endl;
std::string file_path = dirname + "/" + var.name();
std::ifstream is(file_path);
std::streampos pos = is.tellg(); // save current position
std::streampos pos = is.tellg(); // save current position
is.seekg(0, std::ios::end);
// std::cout << " file length = " << is.tellg() << std::endl;
is.seekg(pos); // restore saved position
is.seekg(pos); // restore saved position
// 1. version
uint32_t version;
......@@ -333,33 +333,33 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
int type_size = 0;
// std::cout << " desc pre type: ";
switch (desc.data_type()) {
case framework::proto::VarType::FP16:
// std::cout << "FP16" << std::endl;
type_size = 2;
break;
case framework::proto::VarType::FP32:
type_size = 4;
// std::cout << "FP32" << std::endl;
break;
case framework::proto::VarType::FP64:
type_size = 8;
// std::cout << "FP64" << std::endl;
break;
case framework::proto::VarType::INT32:
type_size = 4;
// std::cout << "INT32" << std::endl;
break;
case framework::proto::VarType::INT64:
type_size = 8;
// std::cout << "INT64" << std::endl;
break;
case framework::proto::VarType::BOOL:
type_size = 1;
// std::cout << "BOOL" << std::endl;
break;
default:
break;
// std::cout << " not support" << std::endl;
case framework::proto::VarType::FP16:
// std::cout << "FP16" << std::endl;
type_size = 2;
break;
case framework::proto::VarType::FP32:
type_size = 4;
// std::cout << "FP32" << std::endl;
break;
case framework::proto::VarType::FP64:
type_size = 8;
// std::cout << "FP64" << std::endl;
break;
case framework::proto::VarType::INT32:
type_size = 4;
// std::cout << "INT32" << std::endl;
break;
case framework::proto::VarType::INT64:
type_size = 8;
// std::cout << "INT64" << std::endl;
break;
case framework::proto::VarType::BOOL:
type_size = 1;
// std::cout << "BOOL" << std::endl;
break;
default:
break;
// std::cout << " not support" << std::endl;
}
// std::cout << " malloc size: " << memory_size * type_size
......@@ -381,4 +381,4 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
template class Loader<CPU, Precision::FP32>;
} // namespace paddle_mobile
} // namespace paddle_mobile
......@@ -29,11 +29,11 @@ namespace paddle_mobile {
template <typename Dtype, Precision P = Precision::FP32>
class Loader : PaddleMobileObject {
public:
public:
const framework::Program<Dtype, P> Load(const std::string &dirname);
private:
private:
void LoadVar(framework::LoDTensor *tensor, const std::string &file_path);
};
} // namespace paddle_mobile
} // namespace paddle_mobile
......@@ -47,5 +47,5 @@ void Free(void *ptr) {
}
}
} // namespace memory
} // namespace paddle_mobile
} // namespace memory
} // namespace paddle_mobile
......@@ -37,11 +37,10 @@ void Free(void *ptr);
* std::unique_ptr<T> in tensor.h.
* static_cast
*/
template <typename T>
class PODDeleter {
template <typename T> class PODDeleter {
static_assert(std::is_pod<T>::value, "T must be POD");
public:
public:
explicit PODDeleter(){};
void operator()(T *ptr) { Free(static_cast<void *>(ptr)); }
......@@ -55,12 +54,11 @@ class PODDeleter {
* std::unique_ptr<T> in tensor.h.
* reinterpret_cast
*/
template <typename T>
class PlainDeleter {
public:
template <typename T> class PlainDeleter {
public:
explicit PlainDeleter(){};
void operator()(T *ptr) { Free(reinterpret_cast<void *>(ptr)); }
};
} // namespace memory
} // namespace paddle_mobile
} // namespace memory
} // namespace paddle_mobile
......@@ -72,5 +72,5 @@ void ConvOp<Dtype, T>::InferShape() const {
template class ConvOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
} // namespace operators
} // namespace paddle_mobile
......@@ -28,9 +28,9 @@ using namespace framework;
template <typename DeviceType, typename T>
class ConvOp : public framework::OperatorWithKernel<DeviceType> {
public:
ConvOp(const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const framework::AttributeMap& attrs,
public:
ConvOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
......@@ -39,7 +39,7 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
protected:
void RunImpl() const {
operators::ConvKernel<DeviceType, T, ConvParam> kernel;
kernel.Compute(param_);
......@@ -48,5 +48,5 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> {
ConvParam param_;
};
} // operators
} // paddle_mobile
} // operators
} // paddle_mobile
......@@ -21,9 +21,9 @@ SOFTWARE.
namespace paddle_mobile {
namespace operators {
bool IsExpand(const std::vector<int64_t>& filter_dim,
const std::vector<int>& strides, const std::vector<int>& paddings,
const std::vector<int>& dilations) {
bool IsExpand(const std::vector<int64_t> &filter_dim,
const std::vector<int> &strides, const std::vector<int> &paddings,
const std::vector<int> &dilations) {
bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
for (size_t j = 0; j < strides.size(); ++j) {
filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
......@@ -35,8 +35,8 @@ bool IsExpand(const std::vector<int64_t>& filter_dim,
}
template <>
void ConvKernel<CPU, float, ConvParam>::Compute(const ConvParam& param) const {
const Tensor* input = param.Input();
void ConvKernel<CPU, float, ConvParam>::Compute(const ConvParam &param) const {
const Tensor *input = param.Input();
std::cout << " conv param " << param << std::endl;
......@@ -45,7 +45,7 @@ void ConvKernel<CPU, float, ConvParam>::Compute(const ConvParam& param) const {
// that avoids modifying the variable in the Scope.
Tensor filter = *param.Filter();
Tensor* output = param.Output();
Tensor *output = param.Output();
// output->mutable_data<T>(context.GetPlace());
int groups = param.Groups();
......@@ -149,5 +149,5 @@ void ConvKernel<CPU, float, ConvParam>::Compute(const ConvParam& param) const {
template class ConvKernel<CPU, float, ConvParam>;
} // namespace operators
} // namespace paddle_mobile
} // namespace operators
} // namespace paddle_mobile
......@@ -31,7 +31,7 @@ using namespace framework;
template <typename DeviceType, typename T, typename P>
class ConvKernel : public framework::OpKernelBase<DeviceType, ConvParam> {
public:
public:
void Compute(const ConvParam &param) const;
};
}
......
......@@ -24,12 +24,11 @@ namespace math {
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
public:
void operator()(const framework::Tensor& im, const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col) {
template <class T> class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
public:
void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col) {
// PADDLE_ENFORCE(im.dims().size() == 3);
// PADDLE_ENFORCE(col->dims().size() == 5);
......@@ -58,8 +57,8 @@ class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
int channels_col = im_channels * filter_height * filter_width;
const T* im_data = im.data<T>();
T* col_data = col->data<T>();
const T *im_data = im.data<T>();
T *col_data = col->data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
int h_offset = (c / filter_width) % filter_height;
......@@ -86,13 +85,12 @@ class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
public:
void operator()(const framework::Tensor& col,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im) {
template <class T> class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
public:
void operator()(const framework::Tensor &col,
const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *im) {
// PADDLE_ENFORCE(im->dims().size() == 3);
// PADDLE_ENFORCE(col.dims().size() == 5);
int im_channels = im->dims()[0];
......@@ -120,8 +118,8 @@ class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
int channels_col = im_channels * filter_height * filter_width;
T* im_data = im->data<T>();
const T* col_data = col.data<T>();
T *im_data = im->data<T>();
const T *col_data = col.data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
......@@ -152,12 +150,11 @@ template class Col2ImFunctor<ColFormat::kCFO, CPU, double>;
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
public:
void operator()(const framework::Tensor& im, const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col) {
template <class T> class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
public:
void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col) {
// PADDLE_ENFORCE(im.dims().size() == 3);
// PADDLE_ENFORCE(col->dims().size() == 5);
int im_channels = im.dims()[0];
......@@ -177,8 +174,8 @@ class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
// 1, col_width, "col_width and padding(padding_left, padding_right)
// are " "inconsistent.");
const T* im_data = im.data<T>();
T* col_data = col->data<T>();
const T *im_data = im.data<T>();
T *col_data = col->data<T>();
for (int col_row_idx = 0; col_row_idx < col_height; ++col_row_idx) {
for (int col_col_idx = 0; col_col_idx < col_width; ++col_col_idx) {
......@@ -220,13 +217,12 @@ class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
public:
void operator()(const framework::Tensor& col,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im) {
template <class T> class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
public:
void operator()(const framework::Tensor &col,
const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *im) {
// PADDLE_ENFORCE(im->dims().size() == 3);
// PADDLE_ENFORCE(col.dims().size() == 5);
int im_channels = im->dims()[0];
......@@ -246,8 +242,8 @@ class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
// 1, col_width, "col_width and padding(padding_left, padding_right)
// are " "inconsistent.");
T* im_data = im->data<T>();
const T* col_data = col.data<T>();
T *im_data = im->data<T>();
const T *col_data = col.data<T>();
for (int col_row_idx = 0; col_row_idx < col_height; ++col_row_idx) {
for (int col_col_idx = 0; col_col_idx < col_width; ++col_col_idx) {
......@@ -289,6 +285,6 @@ template class Im2ColFunctor<ColFormat::kOCF, CPU, double>;
template class Col2ImFunctor<ColFormat::kOCF, CPU, float>;
template class Col2ImFunctor<ColFormat::kOCF, CPU, double>;
} // namespace math
} // namespace operators
} // namespace paddle_mobile
} // namespace math
} // namespace operators
} // namespace paddle_mobile
......@@ -79,21 +79,21 @@ enum class ColFormat { kCFO = 0, kOCF = 1 };
*/
template <ColFormat Format, typename DeviceType, typename T>
class Im2ColFunctor {
public:
void operator()(const framework::Tensor& im, const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col);
public:
void operator()(const framework::Tensor &im, const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *col);
};
template <ColFormat Format, typename DeviceType, typename T>
class Col2ImFunctor {
public:
void operator()(const framework::Tensor& col,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im);
public:
void operator()(const framework::Tensor &col,
const std::vector<int> &dilation,
const std::vector<int> &stride,
const std::vector<int> &padding, framework::Tensor *im);
};
} // namespace math
} // namespace operators
} // namespace paddle_mobile
} // namespace math
} // namespace operators
} // namespace paddle_mobile
......@@ -21,7 +21,7 @@ namespace math {
template <>
void gemm<float>(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const float alpha,
const float* A, const float* B, const float beta, float* C) {
const float *A, const float *B, const float beta, float *C) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
......@@ -32,8 +32,8 @@ void gemm<float>(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
template <>
void gemm<double>(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const double alpha,
const double* A, const double* B, const double beta,
double* C) {
const double *A, const double *B, const double beta,
double *C) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
......@@ -43,8 +43,8 @@ void gemm<double>(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
template <>
void gemm<float>(const bool transA, const bool transB, const int M, const int N,
const int K, const float alpha, const float* A, const int lda,
const float* B, const int ldb, const float beta, float* C,
const int K, const float alpha, const float *A, const int lda,
const float *B, const int ldb, const float beta, float *C,
const int ldc) {
cblas_sgemm(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans,
transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A,
......@@ -53,18 +53,18 @@ void gemm<float>(const bool transA, const bool transB, const int M, const int N,
template <>
void gemm<double>(const bool transA, const bool transB, const int M,
const int N, const int K, const double alpha, const double* A,
const int lda, const double* B, const int ldb,
const double beta, double* C, const int ldc) {
const int N, const int K, const double alpha, const double *A,
const int lda, const double *B, const int ldb,
const double beta, double *C, const int ldc) {
cblas_dgemm(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans,
transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A,
lda, B, ldb, beta, C, ldc);
}
template <>
void matmul<float>(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
void matmul<float>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, float alpha,
framework::Tensor *matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
......@@ -89,9 +89,9 @@ void matmul<float>(const framework::Tensor& matrix_a, bool trans_a,
}
template <>
void matmul<double>(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b,
double alpha, framework::Tensor* matrix_out, double beta) {
void matmul<double>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b,
double alpha, framework::Tensor *matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
......@@ -115,6 +115,6 @@ void matmul<double>(const framework::Tensor& matrix_a, bool trans_a,
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
} // namespace math
} // namespace operators
} // namespace paddle_mobile
......@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once
#include "framework/tensor.h"
#include <cblas.h>
#include <cmath>
#include "framework/tensor.h"
namespace paddle_mobile {
namespace operators {
......@@ -24,19 +24,19 @@ namespace math {
template <typename T>
void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const T alpha, const T* A,
const T* B, const T beta, T* C);
const int M, const int N, const int K, const T alpha, const T *A,
const T *B, const T beta, T *C);
template <typename T>
void gemm(const bool transA, const bool transB, const int M, const int N,
const int K, const T alpha, const T* A, const int lda, const T* B,
const int ldb, const T beta, T* C, const int ldc);
const int K, const T alpha, const T *A, const int lda, const T *B,
const int ldb, const T beta, T *C, const int ldc);
// matrix multiply with continuous memory
template <typename T>
void matmul(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, T alpha,
framework::Tensor* matrix_out, T beta);
} // namespace math
} // namespace operators
} // namespace paddle_mobile
void matmul(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, T alpha,
framework::Tensor *matrix_out, T beta);
} // namespace math
} // namespace operators
} // namespace paddle_mobile
......@@ -25,12 +25,11 @@ using Tensor = paddle_mobile::framework::Tensor;
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <typename T>
class Vol2ColFunctor<CPU, T> {
public:
void operator()(const Tensor& vol, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* col) const {
template <typename T> class Vol2ColFunctor<CPU, T> {
public:
void operator()(const Tensor &vol, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *col) const {
// PADDLE_ENFORCE(vol.dims().size() == 4);
// PADDLE_ENFORCE(col->dims().size() == 7);
......@@ -69,8 +68,8 @@ class Vol2ColFunctor<CPU, T> {
// "input_width and output_width are "
// "mismatching.");
const T* vol_data = vol.data<T>();
T* col_data = col->data<T>();
const T *vol_data = vol.data<T>();
T *col_data = col->data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
......@@ -108,12 +107,11 @@ class Vol2ColFunctor<CPU, T> {
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <typename T>
class Col2VolFunctor<CPU, T> {
public:
void operator()(const Tensor& col, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* vol) const {
template <typename T> class Col2VolFunctor<CPU, T> {
public:
void operator()(const Tensor &col, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *vol) const {
// PADDLE_ENFORCE(vol->dims().size() == 4);
// PADDLE_ENFORCE(col.dims().size() == 7);
......@@ -151,8 +149,8 @@ class Col2VolFunctor<CPU, T> {
// output_width,
// "input_width and output_width are "
// "mismatching.");
T* vol_data = vol->data<T>();
const T* col_data = col.data<T>();
T *vol_data = vol->data<T>();
const T *col_data = col.data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
......@@ -190,6 +188,6 @@ template class Vol2ColFunctor<CPU, double>;
template class Col2VolFunctor<CPU, float>;
template class Col2VolFunctor<CPU, double>;
} // namespace math
} // namespace operators
} // namespace paddle_mobile
} // namespace math
} // namespace operators
} // namespace paddle_mobile
......@@ -64,22 +64,20 @@ namespace math {
*/
using Tensor = paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T>
class Vol2ColFunctor {
public:
void operator()(const Tensor& vol, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* col) const;
template <typename DeviceType, typename T> class Vol2ColFunctor {
public:
void operator()(const Tensor &vol, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *col) const;
};
template <typename DeviceType, typename T>
class Col2VolFunctor {
public:
void operator()(const Tensor& col, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* vol) const;
template <typename DeviceType, typename T> class Col2VolFunctor {
public:
void operator()(const Tensor &col, const std::vector<int> &dilations,
const std::vector<int> &strides,
const std::vector<int> &paddings, Tensor *vol) const;
};
} // namespace math
} // namespace operators
} // namespace paddle_mobile
} // namespace math
} // namespace operators
} // namespace paddle_mobile
......@@ -21,7 +21,7 @@ SOFTWARE.
namespace paddle_mobile {
namespace operators {
std::ostream& operator<<(std::ostream& os, const ConvParam& conv_param) {
std::ostream &operator<<(std::ostream &os, const ConvParam &conv_param) {
os << "parameter of conv: " << std::endl;
os << " stride: "
<< " (" << conv_param.Strides()[0] << conv_param.Strides()[1] << ") "
......@@ -39,5 +39,5 @@ std::ostream& operator<<(std::ostream& os, const ConvParam& conv_param) {
return os;
}
} // namespace operators
} // namespace paddle_mobile
} // namespace operators
} // namespace paddle_mobile
......@@ -30,8 +30,8 @@ namespace operators {
using namespace framework;
class OpParam : PaddleMobileObject {
public:
protected:
public:
protected:
template <typename T>
static T *InputFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetVarValue<T>("Input", inputs, scope);
......@@ -67,7 +67,7 @@ class OpParam : PaddleMobileObject {
};
class ConvParam : OpParam {
public:
public:
ConvParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
......@@ -94,7 +94,7 @@ class ConvParam : OpParam {
const int &Groups() const { return groups; }
private:
private:
Tensor *input_;
Tensor *output_;
LoDTensor *filter_;
......@@ -106,5 +106,5 @@ class ConvParam : OpParam {
std::ostream &operator<<(std::ostream &os, const ConvParam &conv_param);
} // namespace operators
} // namespace paddle_mobile
} // namespace operators
} // namespace paddle_mobile
......@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once
#include "framework/framework.pb.h"
#include <string>
#include <typeindex>
#include "framework/framework.pb.h"
namespace paddle_mobile {
namespace framework {
......@@ -47,70 +47,70 @@ inline proto::VarType::Type ToDataType(std::type_index type) {
inline std::type_index ToTypeIndex(proto::VarType::Type type) {
switch (type) {
// case proto::VarType::FP16:
// return typeid(platform::float16);
case proto::VarType::FP32:
return typeid(float);
case proto::VarType::FP64:
return typeid(double);
case proto::VarType::INT32:
return typeid(int);
case proto::VarType::INT64:
return typeid(int64_t);
case proto::VarType::BOOL:
return typeid(bool);
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
// case proto::VarType::FP16:
// return typeid(platform::float16);
case proto::VarType::FP32:
return typeid(float);
case proto::VarType::FP64:
return typeid(double);
case proto::VarType::INT32:
return typeid(int);
case proto::VarType::INT64:
return typeid(int64_t);
case proto::VarType::BOOL:
return typeid(bool);
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
}
}
template <typename Visitor>
inline void VisitDataType(proto::VarType::Type type, Visitor visitor) {
switch (type) {
// case proto::VarType::FP16:
// visitor.template operator()<platform::float16>();
// break;
case proto::VarType::FP32:
visitor.template operator()<float>();
break;
case proto::VarType::FP64:
visitor.template operator()<double>();
break;
case proto::VarType::INT32:
visitor.template operator()<int>();
break;
case proto::VarType::INT64:
visitor.template operator()<int64_t>();
break;
case proto::VarType::BOOL:
visitor.template operator()<bool>();
break;
default:
// PADDLE_THROW("Not supported");
printf("Not supported");
// case proto::VarType::FP16:
// visitor.template operator()<platform::float16>();
// break;
case proto::VarType::FP32:
visitor.template operator()<float>();
break;
case proto::VarType::FP64:
visitor.template operator()<double>();
break;
case proto::VarType::INT32:
visitor.template operator()<int>();
break;
case proto::VarType::INT64:
visitor.template operator()<int64_t>();
break;
case proto::VarType::BOOL:
visitor.template operator()<bool>();
break;
default:
// PADDLE_THROW("Not supported");
printf("Not supported");
}
}
inline std::string DataTypeToString(const proto::VarType::Type type) {
switch (type) {
case proto::VarType::FP16:
return "float16";
case proto::VarType::FP32:
return "float32";
case proto::VarType::FP64:
return "float64";
case proto::VarType::INT16:
return "int16";
case proto::VarType::INT32:
return "int32";
case proto::VarType::INT64:
return "int64";
case proto::VarType::BOOL:
return "bool";
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
case proto::VarType::FP16:
return "float16";
case proto::VarType::FP32:
return "float32";
case proto::VarType::FP64:
return "float64";
case proto::VarType::INT16:
return "int16";
case proto::VarType::INT32:
return "int32";
case proto::VarType::INT64:
return "int64";
case proto::VarType::BOOL:
return "bool";
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
}
}
......@@ -120,5 +120,5 @@ inline std::ostream &operator<<(std::ostream &out,
return out;
}
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
......@@ -16,10 +16,10 @@ limitations under the License. */
// Disable the copy and assignment operator for a class.
#ifndef DISABLE_COPY_AND_ASSIGN
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname&) = delete; \
classname(classname&&) = delete; \
classname& operator=(const classname&) = delete; \
classname& operator=(classname&&) = delete
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname &) = delete; \
classname(classname &&) = delete; \
classname &operator=(const classname &) = delete; \
classname &operator=(classname &&) = delete
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册