提交 da45ba92 编写于 作者: L liuruilong

Merge remote-tracking branch 'upstream/develop' into develop

cmake_minimum_required(VERSION 3.0)
project(paddle-mobile)
add_definitions(-DPADDLE_MOBILE_DEBUG)
add_definitions(-DENABLE_EXCEPTION)
#add_definitions(-DPADDLE_MOBILE_DEBUG)
#add_definitions(-DENABLE_EXCEPTION)
if(IS_MAC)
add_definitions(-DX86)
......@@ -15,7 +15,8 @@ else ()
add_definitions(-DX86)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
set(CMAKE_BUILD_TYPE Release)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
......
......@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "common/enforce.h"
#include "common/log.h"
#pragma once
......@@ -55,15 +56,11 @@ class RawData {
char data[size];
RawData() {}
RawData(const RawData &raw_data) { strcpy(data, raw_data.data); }
// void operator=(const RawData &raw_data){
// strcpy(data, raw_data.data);
// }
};
template <typename... Ts>
struct Variant {
Variant(const Variant &variant) {
// std::cout << " 赋值构造函数 " << std::endl;
type_id = variant.type_id;
data = variant.data;
}
......@@ -85,8 +82,7 @@ struct Variant {
if (type_id == typeid(T).hash_code()) {
return *const_cast<T *>(reinterpret_cast<const T *>(&data));
} else {
// std::cout << " bad cast in variant " << std::endl;
throw std::bad_cast();
PADDLE_MOBILE_THROW_EXCEPTION(" bad cast in variant ");
}
}
......
......@@ -127,7 +127,7 @@ class Attribute {
} else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) {
return vistor(attr.variant_.Get<int64_t>());
} else {
throw std::bad_exception();
PADDLE_MOBILE_THROW_EXCEPTION("type not support");
}
}
......
......@@ -15,7 +15,6 @@ limitations under the License. */
#pragma once
#include <cctype>
#include <iostream>
#include <string>
namespace paddle_mobile {
......@@ -40,7 +39,7 @@ inline DataLayout StringToDataLayout(const std::string &str) {
} else if (s == "ANYLAYOUT") {
return DataLayout::kAnyLayout;
} else {
// std::cout << "Unknown storage order string: %s", s;
PADDLE_MOBILE_THROW_EXCEPTION("Unknown storage order string: %s", s)
}
}
......
......@@ -63,9 +63,6 @@ void make_ddim(DDim &ddim, const int64_t *dims, int n) {
ddim = make_dim<9>(dims);
break;
default:
// std::cout << "Dynamic dimensions must have between [1,
// 9]
// dimensions.";
break;
}
}
......@@ -133,9 +130,6 @@ int64_t DDim::operator[](int idx) const {
int DDim::size() const { return arity(*this); }
bool DDim::operator==(DDim d) const {
// if (var.which() != d.getVar().which()) {
// return false;
// } else {
std::vector<int64_t> v1 = vectorize(*this);
std::vector<int64_t> v2 = vectorize(d);
......@@ -157,7 +151,7 @@ DDim DDim::operator+(DDim d) const {
std::vector<int64_t> v3;
assert(v1.size() == v2.size());
PADDLE_MOBILE_ENFORCE(v1.size() == v2.size(), "v1.size() != v2.size()");
for (unsigned int i = 0; i < v1.size(); i++) {
v3.push_back(v1[i] + v2[i]);
......@@ -172,7 +166,7 @@ DDim DDim::operator*(DDim d) const {
std::vector<int64_t> v3;
assert(v1.size() == v2.size());
PADDLE_MOBILE_ENFORCE(v1.size() == v2.size(), "v1.size() == v2.size()");
for (unsigned int i = 0; i < v1.size(); i++) {
v3.push_back(v1[i] * v2[i]);
......@@ -235,13 +229,10 @@ struct SliceVectorizeVisitor : Vistor<void> {
SliceVectorizeVisitor(std::vector<int64_t> &v, int b, int e)
: vector(v), begin(b), end(e) {
// PADDLE_ENFORCE(begin < end,
// "Begin index must be less than end index in
// ddim
// slice.");
// PADDLE_ENFORCE(begin >= 0,
// "Begin index can't be less than zero in
// ddim slice.");
PADDLE_MOBILE_ENFORCE(
begin < end, "Begin index must be less than end index in ddim slice.");
PADDLE_MOBILE_ENFORCE(begin >= 0,
"Begin index can't be less than zero in ddim slice.");
}
template <int S>
......@@ -267,9 +258,7 @@ DDim slice_ddim(const DDim &ddim, int begin, int end) {
std::vector<int64_t> vec;
vec.reserve(end - begin);
SliceVectorizeVisitor visitor(vec, begin, end);
// boost::apply_visitor(visitor, dim);
DDim::ApplyVistor(visitor, ddim);
// visitor(ddim.var.Get<Dim<4>>());
return make_ddim(vec);
}
......@@ -287,31 +276,40 @@ struct ArityVisitor : Vistor<int> {
int arity(const DDim &d) {
ArityVisitor arityVisitor = ArityVisitor();
return DDim::ApplyVistor(arityVisitor, d);
// return arityVisitor(d.var.Get<Dim<4>>());
// return boost::apply_visitor(ArityVisitor(), d); }
}
/// \cond HIDDEN
/// \endcond
/// \cond HIDDEN
struct OSVistor : Vistor<std::ostream &> {
OSVistor(std::ostream &os) : os_(os) {}
/// \endcond
template <int D>
std::ostream &operator()(Dim<D> dim) const {
return os_ << dim;
// struct OSVistor : Vistor<std::ostream &> {
// OSVistor(std::ostream &os) : os_(os) {}
//
// template <int D>
// std::ostream &operator()(Dim<D> dim) const {
// return os_ << dim;
// }
//
// private:
// std::ostream &os_;
//};
// std::ostream &operator<<(std::ostream &os, const DDim &ddim) {
// auto vistor = OSVistor(os);
// DDim::ApplyVistor(vistor, ddim);
// return os;
//}
#ifdef PADDLE_MOBILE_DEBUG
inline Print &operator<<(Print &printer, const DDim &ddim) {
for (int j = 0; j < ddim.size(); ++j) {
printer << ddim[j] << " ";
}
private:
std::ostream &os_;
};
std::ostream &operator<<(std::ostream &os, const DDim &ddim) {
auto vistor = OSVistor(os);
DDim::ApplyVistor(vistor, ddim);
return os;
return printer;
}
#endif
DDim::DDim(std::initializer_list<int64_t> init_list) {
*this = make_ddim(init_list);
}
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#pragma once
#include <initializer_list>
#include <stdexcept>
#include <vector>
#include "common/enforce.h"
#include "common/variant.h"
#include "dim.h"
......@@ -57,9 +57,7 @@ struct DDim {
} else if (d.var.TypeId() == typeid(Dim<9>).hash_code()) {
return vistor(d.var.Get<Dim<9>>());
} else {
printf(" dim not support \n");
throw std::bad_exception();
// return typename Vistor::type_t();
DLOG << " dim not support";
}
}
......@@ -139,8 +137,6 @@ DDim slice_ddim(const DDim &dim, int begin, int end);
int arity(const DDim &ddim);
std::ostream &operator<<(std::ostream &, const DDim &);
// Reshape a tensor to a matrix. The matrix's first dimension(column
// length)
// will be the product of tensor's first `num_col_dims` dimensions.
......
......@@ -14,10 +14,7 @@ limitations under the License. */
#pragma once
#include <sstream>
#include <stdexcept>
#include <type_traits>
#include "common/enforce.h"
namespace paddle_mobile {
namespace framework {
......@@ -71,13 +68,9 @@ struct Dim<0> {
Dim() {}
Dim(int idx, const Dim<0> &size) {
#ifndef __CUDA_ARCH__
if (idx > 0) {
throw std::invalid_argument("Index out of range.");
PADDLE_MOBILE_THROW_EXCEPTION("Index out of range.")
}
#else
PADDLE_ASSERT(idx == 0);
#endif
}
bool operator==(const Dim<0> &o) const { return true; }
......@@ -123,13 +116,10 @@ struct DimGetter<0> {
template <int D>
int64_t &indexer(Dim<D> &dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
PADDLE_MOBILE_THROW_EXCEPTION("Tried to access a negative dimension")
}
#else
PADDLE_ASSERT(idx >= 0);
#endif
if (idx == 0) {
return dim.head;
}
......@@ -138,30 +128,14 @@ int64_t &indexer(Dim<D> &dim, int idx) {
template <>
int64_t &indexer<0>(Dim<0> &dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
PADDLE_ASSERT(false);
#if CUDA_VERSION < 8000
// On CUDA versions previous to 8.0, only __shared__ variables
// could be declared as static in the device code.
int64_t head = 0;
#else
static int64_t head = 0;
#endif
return head;
#endif
PADDLE_MOBILE_THROW_EXCEPTION("Invalid index")
}
template <int D>
int64_t indexer(const Dim<D> &dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
PADDLE_MOBILE_THROW_EXCEPTION("Tried to access a negative dimension")
}
#else
PADDLE_ASSERT(idx >= 0);
#endif
if (idx == 0) {
return dim.head;
}
......@@ -170,19 +144,7 @@ int64_t indexer(const Dim<D> &dim, int idx) {
template <>
int64_t indexer<0>(const Dim<0> &dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
PADDLE_ASSERT(false);
#if CUDA_VERSION < 8000
// On CUDA versions previous to 8.0, only __shared__ variables
// could be declared as static in the device code.
int64_t head = 0;
#else
static int64_t head = 0;
#endif
return head;
#endif
PADDLE_MOBILE_THROW_EXCEPTION("Invalid index")
}
} // namespace
......@@ -363,50 +325,5 @@ Dim<sizeof...(Args)> make_dim(Args... idxes) {
return Dim<sizeof...(Args)>(idxes...);
}
// Allows us to output a Dim
// XXX For some reason, overloading fails to resolve this correctly
template <int i>
typename std::enable_if<(i > 1), std::ostream &>::type operator<<(
std::ostream &os, const Dim<i> &d) {
os << d.head << ", " << d.tail;
return os;
}
// Base case that allows us to output a Dim
// XXX I wish this could be an overload instead of a template
template <int i>
typename std::enable_if<(i == 1), std::ostream &>::type operator<<(
std::ostream &os, const Dim<i> &d) {
os << d.head;
return os;
}
inline std::ostream &operator<<(std::ostream &os, const Dim<0> &d) {
return os;
}
template <int i>
std::string Dim<i>::to_string() const {
std::stringstream stream;
stream << *this;
return stream.str();
}
template <int D>
Dim<D> linear_to_dimension(int linear_index, Dim<D> extents) {
Dim<D> result;
for (int i = 0; i < D - 1; ++i) {
result[i] = linear_index % extents[i];
linear_index /= extents[i];
}
result[D - 1] = linear_index;
return result;
}
} // namespace framework
} // namespace paddle_mobile
......@@ -13,53 +13,49 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "lod_tensor.h"
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>
namespace paddle_mobile {
namespace framework {
std::ostream &operator<<(std::ostream &os, const LoD &lod) {
os << "{";
for (auto &v : lod) {
os << "{";
bool is_first = true;
for (auto &i : v) {
if (is_first) {
os << i;
is_first = false;
} else {
os << ", " << i;
}
}
os << "}";
}
os << "}";
return os;
}
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(),
"t.type() is not float");
os << "dim: " << t.dims() << "\n";
os << "lod: " << t.lod() << "\n";
// only print first ten elements
int64_t size = t.numel() < 10 ? t.numel() : 10;
for (int64_t i = 0; i < size; ++i) {
os << t.data<float>()[i] << " ";
}
return os;
}
std::string LoDToString(const LoD &lod) {
std::ostringstream stream;
stream << lod;
return stream.str();
}
// std::ostream &operator<<(std::ostream &os, const LoD &lod) {
// os << "{";
// for (auto &v : lod) {
// os << "{";
// bool is_first = true;
// for (auto &i : v) {
// if (is_first) {
// os << i;
// is_first = false;
// } else {
// os << ", " << i;
// }
// }
// os << "}";
// }
// os << "}";
//
// return os;
//}
//
// std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
// PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(),
// "t.type() is not float");
// os << "dim: " << t.dims() << "\n";
// os << "lod: " << t.lod() << "\n";
// // only print first ten elements
// int64_t size = t.numel() < 10 ? t.numel() : 10;
// for (int64_t i = 0; i < size; ++i) {
// os << t.data<float>()[i] << " ";
// }
//
// return os;
//}
// std::string LoDToString(const LoD &lod) {
// std::ostringstream stream;
// stream << lod;
// return stream.str();
//}
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
size_t elem_end) {
......@@ -139,7 +135,7 @@ bool CheckLoD(const LoD &in, int tensor_height) {
if (a < b) return true;
return false;
})) {
std::cout << "ascending error";
PADDLE_MOBILE_THROW_EXCEPTION("ascending error")
return false;
}
}
......
......@@ -144,7 +144,7 @@ void Node::Folder(
}
}
}
#ifdef PADDLE_MOBILE_DEBUG
std::string Node::ToString(std::string blank, const Node *node) const {
std::stringstream ss;
ss << type_ << "-> \n";
......@@ -175,6 +175,7 @@ Print &operator<<(Print &printer, const Node &node) {
printer << node.ToString();
return printer;
}
#endif
} // namespace framework
} // namespace paddle_mobile
......@@ -17,7 +17,6 @@ limitations under the License. */
#include <map>
#include <string>
#include <vector>
#include "common/log.h"
#include "framework/program/op_desc.h"
......@@ -34,7 +33,11 @@ class Node {
: op_desc_(op_desc), type_(op_desc->Type()) {}
Node &operator>(std::shared_ptr<Node> node);
bool operator==(const Node &in);
#ifdef PADDLE_MOBILE_DEBUG
std::string ToString() const;
void Description();
#endif
std::shared_ptr<Node> To(int size);
uint Depth(uint begin = 0);
Node &Folder(
......@@ -44,7 +47,6 @@ class Node {
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(uint size);
std::shared_ptr<framework::OpDesc> OpDescOfNode() { return op_desc_; }
std::string Type() { return type_; }
void Description();
private:
void OpDescs(uint size,
......@@ -56,7 +58,9 @@ class Node {
std::map<std::string, std::pair<std::string, std::string>> *change,
Node *begin_node, std::vector<std::shared_ptr<Node>> *removed_nodes);
std::shared_ptr<framework::OpDesc> op_desc_;
#ifdef PADDLE_MOBILE_DEBUG
std::string ToString(std::string blank, const Node *node) const;
#endif
std::vector<std::shared_ptr<Node>> outputs_;
std::vector<Node *> inputs_;
std::string type_;
......
......@@ -14,17 +14,17 @@ limitations under the License. */
#pragma once
#include <list> //std::list
#include <mutex> //std::mutex
#include <unordered_map> //std::unordered_map
#include <list>
#include <mutex>
#include <unordered_map>
#include "variable.h"
namespace paddle_mobile {
namespace framework {
class Scope {
public:
Scope() {}
~Scope() {}
Scope() = default;
~Scope() = default;
Scope &NewScope() const;
......
......@@ -13,89 +13,19 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "tensor_util.h"
#include <algorithm>
#include <limits>
#include <vector>
namespace paddle_mobile {
namespace framework {
void TensorCopy(const Tensor &src, Tensor *dst) {
// VLOG(3) << "TensorCopy " << src.dims() << " from " <<
// src.place() << " to
// "
// << dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
auto src_ptr = src.data<void>();
auto dst_ptr = dst->mutable_data(src.type());
auto size = src.numel() * SizeOfType(src.type());
memory::Copy(dst_ptr, src_ptr, size);
}
void TensorCopySync(const Tensor &src, Tensor *dst) {
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
auto src_ptr = src.data<void>();
auto dst_ptr = dst->mutable_data(src.type());
auto size = src.numel() * SizeOfType(src.type());
memory::Copy(dst_ptr, src_ptr, size);
}
template <typename Predicate>
struct AnyDTypeVisitor {
Predicate predicate_;
const Tensor &tensor_;
Tensor *out_;
AnyDTypeVisitor(Predicate predicate, const Tensor &tensor, Tensor *out)
: predicate_(predicate), tensor_(tensor), out_(out) {}
template <typename T>
void operator()() const {
// auto t = EigenVector<T>::Flatten(tensor_);
// auto o = EigenScalar<bool>::From(*out_);
// return any of predicate_(t) is true.
// o.device(*ctx_.eigen_device()) = predicate_(t).any();
}
};
struct ContainsNANPredicate {
template <typename T>
auto operator()(const T &eigen_vec) const
-> decltype(std::declval<T>().isnan()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isnan();
}
};
struct ContainsInfPredicate {
template <typename T>
auto operator()(const T &eigen_vec) const
-> decltype(std::declval<T>().isinf()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isinf();
}
};
struct DeserializedDataFunctor {
DeserializedDataFunctor(void **buf, Tensor *tensor)
: buf_(buf), tensor_(tensor) {}
template <typename T>
void operator()() {
*buf_ = tensor_->mutable_data<T>();
}
void **buf_;
Tensor *tensor_;
};
} // namespace framework
} // namespace paddle_mobile
......@@ -21,44 +21,6 @@ namespace paddle_mobile {
namespace framework {
void TensorCopy(const Tensor &src, Tensor *dst);
void TensorCopySync(const Tensor &src, Tensor *dst);
template <typename T>
void TensorFromVector(const std::vector<T> &src, Tensor *dst);
template <typename T>
void TesnorToVector(const Tensor &src, std::vector<T> *dst);
bool TensorContainsNAN(const framework::Tensor &tensor);
bool TensorContainsInf(const framework::Tensor &tensor);
void TensorToStream(std::ostream &os, const Tensor &tensor);
void TensorFromStream(std::istream &is, Tensor *tensor);
//
// The implementation of template functions.
//
template <typename T>
void TensorFromVector(const std::vector<T> &src, Tensor *dst) {
auto src_ptr = static_cast<const void *>(src.data());
dst->Resize({static_cast<int64_t>(src.size())});
auto dst_ptr = static_cast<void *>(dst->mutable_data<T>());
auto size = src.size() * sizeof(T);
memory::Copy(dst_ptr, src_ptr, size);
}
template <typename T>
void TensorToVector(const Tensor &src, std::vector<T> *dst) {
auto src_ptr = static_cast<const void *>(src.data<T>());
auto size = src.numel() * sizeof(T);
dst->resize(src.numel());
auto dst_ptr = static_cast<void *>(dst->data());
memory::Copy(dst_ptr, src_ptr, size);
}
} // namespace framework
} // namespace paddle_mobile
......@@ -49,7 +49,7 @@ void BatchNormKernel<CPU, float>::Compute(const BatchNormParam &param) const {
Tensor inv_std;
auto inv_std_ptr = inv_std.mutable_data<float>(make_ddim({C}));
if (C != variance->numel()) {
std::cout << "C must equal to variance.numel()" << std::endl;
DLOG << "C must equal to variance.numel()";
}
assert(C == variance->numel());
......
......@@ -46,7 +46,7 @@ build_for_android() {
if [ "${PLATFORM}" = "arm-v7a" ]; then
ABI="armeabi-v7a with NEON"
ARM_PLATFORM="V7"
CXX_FLAGS="-O3 -std=c++11 -s -march=armv7-a -mfpu=neon -mfloat-abi=softfp -pie -fPIE -w -Wno-error=format-security -llog"
CXX_FLAGS="-O3 -std=c++11 -s -march=armv7-a -mfpu=neon -mfloat-abi=softfp -pie -fPIE -w -Wno-error=format-security -fno-exceptions"
elif [ "${PLATFORM}" = "arm-v8a" ]; then
ABI="arm64-v8a"
ARM_PLATFORM="V8"
......@@ -62,29 +62,30 @@ build_for_android() {
TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake"
ANDROID_ARM_MODE="arm"
if [ $# -eq 1 ]; then
NET=$1
cmake .. \
-B"../build/release/${PLATFORM}" \
-DANDROID_ABI="${ABI}" \
-DCMAKE_BUILD_TYPE="${MODE}" \
-DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \
-DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DANDROID_STL=c++_static \
-DANDROID=true \
-D"${NET}"=true \
-D"${ARM_PLATFORM}"=true
NET=$1
cmake .. \
-B"../build/release/${PLATFORM}" \
-DANDROID_ABI="${ABI}" \
-DCMAKE_BUILD_TYPE="${MODE}" \
-DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \
-DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DANDROID_STL=c++_static \
-DANDROID=true \
-D"${NET}=true" \
-D"${ARM_PLATFORM}"=true
else
cmake .. \
-B"../build/release/${PLATFORM}" \
-DANDROID_ABI="${ABI}" \
-DCMAKE_BUILD_TYPE="${MODE}" \
-DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \
-DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DANDROID_STL=c++_static \
-DANDROID=true \
-D"${ARM_PLATFORM}"=true
cmake .. \
-B"../build/release/${PLATFORM}" \
-DANDROID_ABI="${ABI}" \
-DCMAKE_BUILD_TYPE="${MODE}" \
-DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \
-DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DANDROID_STL=c++_static \
-DANDROID=true \
-D"${ARM_PLATFORM}"=true
fi
cd "../build/release/${PLATFORM}"
make -j 8
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册