提交 027df0cd 编写于 作者: L liuruilong

delete unused files

上级 22462bcb
......@@ -19,8 +19,6 @@ limitations under the License. */
#include <stdexcept>
#include <type_traits>
#include "platform/hostdevice.h"
namespace paddle_mobile {
namespace framework {
......@@ -30,42 +28,35 @@ struct Dim {
static constexpr int dimensions = i;
template <typename... Args>
HOSTDEVICE Dim(int64_t _head, Args... _tail) : head(_head), tail(_tail...) {
Dim(int64_t _head, Args... _tail) : head(_head), tail(_tail...) {
static_assert(sizeof...(_tail) == i - 1,
"Dim initialized with the wrong number of parameters");
}
HOSTDEVICE
Dim(int64_t _head, const Dim<i - 1> &_tail) : head(_head), tail(_tail) {}
HOSTDEVICE
Dim() : head(0), tail() {}
/** Construct a Dim from a linear index and size. Uses Fortran
* order
* indexing. */
HOSTDEVICE
Dim(int64_t idx, const Dim<i> &size)
: head(idx % size.head), tail(idx / size.head, size.tail) {}
/** Construct a Dim with each dimension set to the given index */
HOSTDEVICE
Dim(int64_t idx) : head(idx), tail(idx) {}
HOSTDEVICE
bool operator==(const Dim<i> &o) const {
return (head == o.head) && (tail == o.tail);
}
HOSTDEVICE
bool operator!=(const Dim<i> &o) const { return !(*this == o); }
HOSTDEVICE
int64_t &operator[](int idx);
HOSTDEVICE
int64_t operator[](int idx) const;
HOST std::string to_string() const;
std::string to_string() const;
int64_t head;
Dim<i - 1> tail;
......@@ -76,13 +67,10 @@ template <>
struct Dim<0> {
static constexpr int dimensions = 0;
HOSTDEVICE
Dim(int64_t _head) {}
HOSTDEVICE
Dim() {}
HOSTDEVICE
Dim(int idx, const Dim<0> &size) {
#ifndef __CUDA_ARCH__
if (idx > 0) {
......@@ -93,15 +81,12 @@ struct Dim<0> {
#endif
}
HOSTDEVICE
bool operator==(const Dim<0> &o) const { return true; }
HOSTDEVICE
bool operator!=(const Dim<0> &o) const { return false; }
HOSTDEVICE
int64_t &operator[](int idx);
HOSTDEVICE
int64_t operator[](int idx) const;
};
......@@ -112,12 +97,12 @@ template <int i>
struct DimGetter {
// Return a copy if Dim is const
template <typename D>
HOSTDEVICE static int64_t impl(const D &d) {
static int64_t impl(const D &d) {
return DimGetter<i - 1>::impl(d.tail);
}
// Return a reference if Dim is mutable
template <typename D>
HOSTDEVICE static int64_t &impl(D &d) {
static int64_t &impl(D &d) {
return DimGetter<i - 1>::impl(d.tail);
}
};
......@@ -127,18 +112,18 @@ template <>
struct DimGetter<0> {
// Return a copy if Dim is const
template <typename D>
HOSTDEVICE static int64_t impl(const D &d) {
static int64_t impl(const D &d) {
return d.head;
}
// Return a reference if Dim is mutable
template <typename D>
HOSTDEVICE static int64_t &impl(D &d) {
static int64_t &impl(D &d) {
return d.head;
}
};
template <int D>
HOSTDEVICE int64_t &indexer(Dim<D> &dim, int idx) {
int64_t &indexer(Dim<D> &dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
......@@ -153,7 +138,7 @@ HOSTDEVICE int64_t &indexer(Dim<D> &dim, int idx) {
}
template <>
HOSTDEVICE int64_t &indexer<0>(Dim<0> &dim, int idx) {
int64_t &indexer<0>(Dim<0> &dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
......@@ -170,7 +155,7 @@ HOSTDEVICE int64_t &indexer<0>(Dim<0> &dim, int idx) {
}
template <int D>
HOSTDEVICE int64_t indexer(const Dim<D> &dim, int idx) {
int64_t indexer(const Dim<D> &dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
......@@ -185,7 +170,7 @@ HOSTDEVICE int64_t indexer(const Dim<D> &dim, int idx) {
}
template <>
HOSTDEVICE int64_t indexer<0>(const Dim<0> &dim, int idx) {
int64_t indexer<0>(const Dim<0> &dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
......@@ -204,83 +189,83 @@ HOSTDEVICE int64_t indexer<0>(const Dim<0> &dim, int idx) {
} // namespace
// Static access to constant Dim
template <int i, int l>
HOSTDEVICE int64_t get(const Dim<l> &d) {
int64_t get(const Dim<l> &d) {
return DimGetter<i>::impl(d);
}
// Static access to mutable Dim
template <int i, int l>
HOSTDEVICE int64_t &get(Dim<l> &d) {
int64_t &get(Dim<l> &d) {
return DimGetter<i>::impl(d);
}
// Dynamic access to constant Dim
template <int l>
HOSTDEVICE int64_t Dim<l>::operator[](int i) const {
int64_t Dim<l>::operator[](int i) const {
// std::cout << "l: " << l << std::endl;
return indexer(*this, i);
}
// Dynamic access to mutable Dim
template <int l>
HOSTDEVICE int64_t &Dim<l>::operator[](int i) {
int64_t &Dim<l>::operator[](int i) {
return indexer(*this, i);
}
// Dynamic access to constant Dim
inline HOSTDEVICE int64_t Dim<0>::operator[](int i) const {
inline int64_t Dim<0>::operator[](int i) const {
return indexer(*this, i);
}
// Dynamic access to mutable Dim
inline HOSTDEVICE int64_t &Dim<0>::operator[](int i) {
inline int64_t &Dim<0>::operator[](int i) {
return indexer(*this, i);
}
// Dynamic access to constant Dim
// without std::enable_if will try to instantiate this on get<0>(d)
template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int64_t>::type get(const Dim<l> &d,
typename std::enable_if<(l > 0), int64_t>::type get(const Dim<l> &d,
int i) {
return d[i];
}
// Dynamic access to mutable Dim
template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int64_t &>::type get(Dim<l> &d,
typename std::enable_if<(l > 0), int64_t &>::type get(Dim<l> &d,
int i) {
return d[i];
}
// Dot product of two dims
template <int i>
HOSTDEVICE int64_t linearize(const Dim<i> &a, const Dim<i> &b) {
int64_t linearize(const Dim<i> &a, const Dim<i> &b) {
return a.head * b.head + linearize(a.tail, b.tail);
}
// Base case dot product of two Dims
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline int64_t linearize(const Dim<0> &a, const Dim<0> &b) {
inline int64_t linearize(const Dim<0> &a, const Dim<0> &b) {
return 0;
}
// Product of a Dim
template <int i>
HOSTDEVICE int64_t product(const Dim<i> &a, int prod = 1) {
int64_t product(const Dim<i> &a, int prod = 1) {
return prod * a.head * product(a.tail);
}
// Base case product of a Dim
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline int64_t product(const Dim<0> &a, int prod) {
inline int64_t product(const Dim<0> &a, int prod) {
return prod;
}
// Is 0 <= idx_i < size_i for all i?
template <int i>
HOSTDEVICE bool contained(const Dim<i> &idx, const Dim<i> &size) {
bool contained(const Dim<i> &idx, const Dim<i> &size) {
return ((0 <= idx.head) && (idx.head < size.head) &&
contained(idx.tail, size.tail));
}
......@@ -288,7 +273,7 @@ HOSTDEVICE bool contained(const Dim<i> &idx, const Dim<i> &size) {
// Base case of is 0 <= idx_i < size_i ?
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline bool contained(const Dim<0> &idx, const Dim<0> &size) {
inline bool contained(const Dim<0> &idx, const Dim<0> &size) {
return true;
}
......@@ -296,7 +281,7 @@ HOSTDEVICE inline bool contained(const Dim<0> &idx, const Dim<0> &size) {
* \brief Compute exclusive prefix-multiply of a Dim.
*/
template <int i>
HOSTDEVICE Dim<i> ex_prefix_mul(const Dim<i> &src, int mul = 1) {
Dim<i> ex_prefix_mul(const Dim<i> &src, int mul = 1) {
return Dim<i>(mul, ex_prefix_mul(src.tail, mul * src.head));
}
......@@ -304,7 +289,7 @@ HOSTDEVICE Dim<i> ex_prefix_mul(const Dim<i> &src, int mul = 1) {
// Base case of ex_prefix_mul
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline Dim<0> ex_prefix_mul(const Dim<0> &src, int mul) {
inline Dim<0> ex_prefix_mul(const Dim<0> &src, int mul) {
return Dim<0>();
}
///\endcond
......@@ -313,18 +298,18 @@ HOSTDEVICE inline Dim<0> ex_prefix_mul(const Dim<0> &src, int mul) {
* Add two dimensions together
*/
template <int i>
HOSTDEVICE Dim<i> dim_plus(const Dim<i> &a, const Dim<i> &b) {
Dim<i> dim_plus(const Dim<i> &a, const Dim<i> &b) {
return Dim<i>(a.head + b.head, dim_plus(a.tail, b.tail));
}
// Base case
template <>
HOSTDEVICE inline Dim<0> dim_plus(const Dim<0> &a, const Dim<0> &b) {
inline Dim<0> dim_plus(const Dim<0> &a, const Dim<0> &b) {
return Dim<0>();
}
template <int i>
HOSTDEVICE Dim<i> operator+(const Dim<i> &lhs, const Dim<i> &rhs) {
Dim<i> operator+(const Dim<i> &lhs, const Dim<i> &rhs) {
return dim_plus(lhs, rhs);
}
......@@ -332,18 +317,18 @@ HOSTDEVICE Dim<i> operator+(const Dim<i> &lhs, const Dim<i> &rhs) {
* Multiply two dimensions together
*/
template <int i>
HOSTDEVICE Dim<i> dim_mult(const Dim<i> &a, const Dim<i> &b) {
Dim<i> dim_mult(const Dim<i> &a, const Dim<i> &b) {
return Dim<i>(a.head * b.head, dim_mult(a.tail, b.tail));
}
// Base case
template <>
HOSTDEVICE inline Dim<0> dim_mult(const Dim<0> &a, const Dim<0> &b) {
inline Dim<0> dim_mult(const Dim<0> &a, const Dim<0> &b) {
return Dim<0>();
}
template <int i>
HOSTDEVICE Dim<i> operator*(const Dim<i> &lhs, const Dim<i> &rhs) {
Dim<i> operator*(const Dim<i> &lhs, const Dim<i> &rhs) {
return dim_mult(lhs, rhs);
}
......@@ -358,7 +343,7 @@ HOSTDEVICE Dim<i> operator*(const Dim<i> &lhs, const Dim<i> &rhs) {
*/
template <int i>
HOSTDEVICE Dim<i> normalize_strides(const Dim<i> &size, const Dim<i> &stride) {
Dim<i> normalize_strides(const Dim<i> &size, const Dim<i> &stride) {
int norm_stride = size.head == 1 ? 0 : stride.head;
return Dim<i>(norm_stride, normalize_strides(size.tail, stride.tail));
}
......@@ -366,7 +351,7 @@ HOSTDEVICE Dim<i> normalize_strides(const Dim<i> &size, const Dim<i> &stride) {
///\cond HIDDEN
template <>
HOSTDEVICE inline Dim<0> normalize_strides(const Dim<0> &size,
inline Dim<0> normalize_strides(const Dim<0> &size,
const Dim<0> &stride) {
return Dim<0>();
}
......@@ -382,7 +367,7 @@ HOSTDEVICE inline Dim<0> normalize_strides(const Dim<0> &size,
*/
template <typename... Args>
HOSTDEVICE Dim<sizeof...(Args)> make_dim(Args... idxes) {
Dim<sizeof...(Args)> make_dim(Args... idxes) {
return Dim<sizeof...(Args)>(idxes...);
}
......@@ -409,7 +394,7 @@ inline std::ostream &operator<<(std::ostream &os, const Dim<0> &d) {
}
template <int i>
HOST std::string Dim<i>::to_string() const {
std::string Dim<i>::to_string() const {
std::stringstream stream;
stream << *this;
......@@ -418,7 +403,7 @@ HOST std::string Dim<i>::to_string() const {
}
template <int D>
HOSTDEVICE Dim<D> linear_to_dimension(int linear_index, Dim<D> extents) {
Dim<D> linear_to_dimension(int linear_index, Dim<D> extents) {
Dim<D> result;
for (int i = 0; i < D - 1; ++i) {
......
......@@ -39,9 +39,6 @@ void TensorCopy(const Tensor &src, Tensor *dst) {
}
void TensorCopySync(const Tensor &src, Tensor *dst) {
// VLOG(3) << "TensorCopySync " << src.dims() << " from " <<
// src.place()
// << " to " << dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
......@@ -69,41 +66,6 @@ struct AnyDTypeVisitor {
}
};
template <typename Predicate>
inline void AnyImpl(Predicate predicate, const Tensor &tensor,
framework::Tensor *out) {
VisitDataType(ToDataType(tensor.type()),
AnyDTypeVisitor<Predicate>(predicate, tensor, out));
}
template <typename Predicate>
struct AnyVisitor {
const framework::Tensor &tensor_;
Predicate predicate_;
AnyVisitor(const framework::Tensor &tensor, Predicate predicate)
: tensor_(tensor), predicate_(std::move(predicate)) {}
bool operator()(void) const {
framework::Tensor out;
out.Resize({1});
out.mutable_data<bool>();
AnyImpl(predicate_, tensor_, &out);
return this->GetResult(out);
}
bool GetResult(const framework::Tensor &out) const {
return *out.data<bool>();
}
};
template <typename Predicate>
inline bool Any(const framework::Tensor &tensor, Predicate predicate) {
AnyVisitor<Predicate> visitor(tensor, predicate);
// return platform::VisitPlace(visitor);
return visitor();
}
struct ContainsNANPredicate {
template <typename T>
auto operator()(const T &eigen_vec) const
......@@ -113,11 +75,6 @@ struct ContainsNANPredicate {
}
};
bool TensorContainsNAN(const framework::Tensor &tensor) {
ContainsNANPredicate predicate;
return Any(tensor, predicate);
}
struct ContainsInfPredicate {
template <typename T>
auto operator()(const T &eigen_vec) const
......@@ -127,11 +84,6 @@ struct ContainsInfPredicate {
}
};
bool TensorContainsInf(const framework::Tensor &tensor) {
ContainsInfPredicate predicate;
return Any(tensor, predicate);
}
struct DeserializedDataFunctor {
DeserializedDataFunctor(void **buf, Tensor *tensor)
: buf_(buf), tensor_(tensor) {}
......
......@@ -15,7 +15,6 @@ limitations under the License. */
#pragma once
#include <vector>
#include "memory/t_malloc.h"
#include "platform/data_type.h"
#include "tensor.h"
namespace paddle_mobile {
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <typeindex>
#include "framework/program/tensor_desc.h"
namespace paddle_mobile {
namespace framework {
inline VarType_Type ToDataType(std::type_index type) {
/*if (typeid(platform::float16).hash_code() == type.hash_code()) {
return proto::VarType::FP16;
} else */
if (typeid(const float).hash_code() == type.hash_code()) {
// CPPLint complains Using C-style cast. Use
// static_cast<float>() instead
// One fix to this is to replace float with const float because
// typeid(T) == typeid(const T)
// http://en.cppreference.com/w/cpp/language/typeid
return VARTYPE_TYPE_FP32;
} else if (typeid(const double).hash_code() == type.hash_code()) {
return VARTYPE_TYPE_FP64;
} else if (typeid(const int).hash_code() == type.hash_code()) {
return VARTYPE_TYPE_INT32;
} else if (typeid(const int64_t).hash_code() == type.hash_code()) {
return VARTYPE_TYPE_INT64;
} else if (typeid(const bool).hash_code() == type.hash_code()) {
return VARTYPE_TYPE_BOOL;
} else {
// PADDLE_THROW("Not supported");
// std::cout << "Not supported";
}
}
inline std::type_index ToTypeIndex(VarType_Type type) {
switch (type) {
// case proto::VarType::FP16:
// return typeid(platform::float16);
case VARTYPE_TYPE_FP32:
return typeid(float);
case VARTYPE_TYPE_FP64:
return typeid(double);
case VARTYPE_TYPE_INT32:
return typeid(int);
case VARTYPE_TYPE_INT64:
return typeid(int64_t);
case VARTYPE_TYPE_BOOL:
return typeid(bool);
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
}
}
template <typename Visitor>
inline void VisitDataType(VarType_Type type, Visitor visitor) {
switch (type) {
// case proto::VarType::FP16:
// visitor.template operator()<platform::float16>();
// break;
case VARTYPE_TYPE_FP32:
visitor.template operator()<float>();
break;
case VARTYPE_TYPE_FP64:
visitor.template operator()<double>();
break;
case VARTYPE_TYPE_INT32:
visitor.template operator()<int>();
break;
case VARTYPE_TYPE_INT64:
visitor.template operator()<int64_t>();
break;
case VARTYPE_TYPE_BOOL:
visitor.template operator()<bool>();
break;
default:
// PADDLE_THROW("Not supported");
printf("Not supported");
}
}
inline std::string DataTypeToString(const VarType_Type type) {
switch (type) {
case VARTYPE_TYPE_FP16:
return "float16";
case VARTYPE_TYPE_FP32:
return "float32";
case VARTYPE_TYPE_FP64:
return "float64";
case VARTYPE_TYPE_INT16:
return "int16";
case VARTYPE_TYPE_INT32:
return "int32";
case VARTYPE_TYPE_INT64:
return "int64";
case VARTYPE_TYPE_BOOL:
return "bool";
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
}
}
inline std::ostream &operator<<(std::ostream &out, const VarType_Type &type) {
out << DataTypeToString(type);
return out;
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef __CUDACC__
#define HOSTDEVICE __host__ __device__
#define DEVICE __device__
#define HOST __host__
#else
#define HOSTDEVICE
#define DEVICE
#define HOST
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// Disable the copy and assignment operator for a class.
#ifndef DISABLE_COPY_AND_ASSIGN
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname &) = delete; \
classname(classname &&) = delete; \
classname &operator=(const classname &) = delete; \
classname &operator=(classname &&) = delete
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册