diff --git a/CMakeLists.txt b/CMakeLists.txt index dad11ad655119418dcb5c52b1476f4cc81a628e1..32e909e1cce001dff49a90d7753be780e14f08ea 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.0) project(paddle-mobile) -add_definitions(-DPADDLE_MOBILE_DEBUG) -add_definitions(-DENABLE_EXCEPTION) +#add_definitions(-DPADDLE_MOBILE_DEBUG) +#add_definitions(-DENABLE_EXCEPTION) if(IS_MAC) add_definitions(-DX86) @@ -15,7 +15,8 @@ else () add_definitions(-DX86) endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") set(CMAKE_BUILD_TYPE Release) set(CMAKE_VERBOSE_MAKEFILE ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) diff --git a/src/common/variant.h b/src/common/variant.h index e9fd307e8360156edd2b9a0a0517bb7517a3b362..7fbf0ec0772f102165770dc9c8e053f469965f10 100644 --- a/src/common/variant.h +++ b/src/common/variant.h @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "common/enforce.h" #include "common/log.h" #pragma once @@ -55,15 +56,11 @@ class RawData { char data[size]; RawData() {} RawData(const RawData &raw_data) { strcpy(data, raw_data.data); } - // void operator=(const RawData &raw_data){ - // strcpy(data, raw_data.data); - // } }; template struct Variant { Variant(const Variant &variant) { - // std::cout << " 赋值构造函数 " << std::endl; type_id = variant.type_id; data = variant.data; } @@ -85,8 +82,7 @@ struct Variant { if (type_id == typeid(T).hash_code()) { return *const_cast(reinterpret_cast(&data)); } else { - // std::cout << " bad cast in variant " << std::endl; - throw std::bad_cast(); + PADDLE_MOBILE_THROW_EXCEPTION(" bad cast in variant "); } } diff --git a/src/framework/attribute.h b/src/framework/attribute.h index 3aac834f7759a6e064060d93b2066beac9231b45..3b6608cf03e7f786ad8c087dc869516cb6220edb 100644 --- a/src/framework/attribute.h +++ b/src/framework/attribute.h @@ -127,7 +127,7 @@ class Attribute { } else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) { return vistor(attr.variant_.Get()); } else { - throw std::bad_exception(); + PADDLE_MOBILE_THROW_EXCEPTION("type not support"); } } diff --git a/src/framework/data_layout.h b/src/framework/data_layout.h index 9944c88c8fefa9183445b93b3b703a5999d1b682..cb7bad66e3b3c4263a84b2c6f3beae0e82cec9cb 100644 --- a/src/framework/data_layout.h +++ b/src/framework/data_layout.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include -#include #include namespace paddle_mobile { @@ -40,7 +39,7 @@ inline DataLayout StringToDataLayout(const std::string &str) { } else if (s == "ANYLAYOUT") { return DataLayout::kAnyLayout; } else { - // std::cout << "Unknown storage order string: %s", s; + PADDLE_MOBILE_THROW_EXCEPTION("Unknown storage order string: %s", s) } } diff --git a/src/framework/ddim.cpp b/src/framework/ddim.cpp index 4fa01564d5f8cc8b2521903ff310b035d39a635e..925287ebf63562914ce9bc87c8f8120f5d84bee8 100644 --- a/src/framework/ddim.cpp +++ b/src/framework/ddim.cpp @@ -63,9 +63,6 @@ void make_ddim(DDim &ddim, const int64_t *dims, int n) { ddim = make_dim<9>(dims); break; default: - // std::cout << "Dynamic dimensions must have between [1, - // 9] - // dimensions."; break; } } @@ -133,9 +130,6 @@ int64_t DDim::operator[](int idx) const { int DDim::size() const { return arity(*this); } bool DDim::operator==(DDim d) const { - // if (var.which() != d.getVar().which()) { - // return false; - // } else { std::vector v1 = vectorize(*this); std::vector v2 = vectorize(d); @@ -157,7 +151,7 @@ DDim DDim::operator+(DDim d) const { std::vector v3; - assert(v1.size() == v2.size()); + PADDLE_MOBILE_ENFORCE(v1.size() == v2.size(), "v1.size() != v2.size()"); for (unsigned int i = 0; i < v1.size(); i++) { v3.push_back(v1[i] + v2[i]); @@ -172,7 +166,7 @@ DDim DDim::operator*(DDim d) const { std::vector v3; - assert(v1.size() == v2.size()); + PADDLE_MOBILE_ENFORCE(v1.size() == v2.size(), "v1.size() == v2.size()"); for (unsigned int i = 0; i < v1.size(); i++) { v3.push_back(v1[i] * v2[i]); @@ -235,13 +229,10 @@ struct SliceVectorizeVisitor : Vistor { SliceVectorizeVisitor(std::vector &v, int b, int e) : vector(v), begin(b), end(e) { - // PADDLE_ENFORCE(begin < end, - // "Begin index must be less than end index in - // ddim - // slice."); - // PADDLE_ENFORCE(begin >= 0, - // "Begin index can't be less than zero in - // ddim slice."); + PADDLE_MOBILE_ENFORCE( + begin < end, "Begin index must be less than end index in ddim slice."); + PADDLE_MOBILE_ENFORCE(begin >= 0, + "Begin index can't be less than zero in ddim slice."); } template @@ -267,9 +258,7 @@ DDim slice_ddim(const DDim &ddim, int begin, int end) { std::vector vec; vec.reserve(end - begin); SliceVectorizeVisitor visitor(vec, begin, end); - // boost::apply_visitor(visitor, dim); DDim::ApplyVistor(visitor, ddim); - // visitor(ddim.var.Get>()); return make_ddim(vec); } @@ -287,31 +276,40 @@ struct ArityVisitor : Vistor { int arity(const DDim &d) { ArityVisitor arityVisitor = ArityVisitor(); return DDim::ApplyVistor(arityVisitor, d); - // return arityVisitor(d.var.Get>()); - // return boost::apply_visitor(ArityVisitor(), d); } } -/// \cond HIDDEN - -/// \endcond + /// \cond HIDDEN -struct OSVistor : Vistor { - OSVistor(std::ostream &os) : os_(os) {} + /// \endcond - template - std::ostream &operator()(Dim dim) const { - return os_ << dim; + // struct OSVistor : Vistor { + // OSVistor(std::ostream &os) : os_(os) {} + // + // template + // std::ostream &operator()(Dim dim) const { + // return os_ << dim; + // } + // + // private: + // std::ostream &os_; + //}; + + // std::ostream &operator<<(std::ostream &os, const DDim &ddim) { + // auto vistor = OSVistor(os); + // DDim::ApplyVistor(vistor, ddim); + // return os; + //} + +#ifdef PADDLE_MOBILE_DEBUG +inline Print &operator<<(Print &printer, const DDim &ddim) { + for (int j = 0; j < ddim.size(); ++j) { + printer << ddim[j] << " "; } - private: - std::ostream &os_; -}; - -std::ostream &operator<<(std::ostream &os, const DDim &ddim) { - auto vistor = OSVistor(os); - DDim::ApplyVistor(vistor, ddim); - return os; + return printer; } +#endif + DDim::DDim(std::initializer_list init_list) { *this = make_ddim(init_list); } diff --git a/src/framework/ddim.h b/src/framework/ddim.h index 682abd300d61d325d6d248747ad5b3b832197a84..01317c583fe89f2b5234ce36248c9ed529aa2a33 100644 --- a/src/framework/ddim.h +++ b/src/framework/ddim.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include #include +#include "common/enforce.h" #include "common/variant.h" #include "dim.h" @@ -57,9 +57,7 @@ struct DDim { } else if (d.var.TypeId() == typeid(Dim<9>).hash_code()) { return vistor(d.var.Get>()); } else { - printf(" dim not support \n"); - throw std::bad_exception(); - // return typename Vistor::type_t(); + DLOG << " dim not support"; } } @@ -139,8 +137,6 @@ DDim slice_ddim(const DDim &dim, int begin, int end); int arity(const DDim &ddim); -std::ostream &operator<<(std::ostream &, const DDim &); - // Reshape a tensor to a matrix. The matrix's first dimension(column // length) // will be the product of tensor's first `num_col_dims` dimensions. diff --git a/src/framework/dim.h b/src/framework/dim.h index 9022bb5a1f956dfa8342dafb93597c54086c723e..38e62df99519c3e869dc0fd2ae71beed28370122 100644 --- a/src/framework/dim.h +++ b/src/framework/dim.h @@ -14,10 +14,7 @@ limitations under the License. */ #pragma once -#include -#include -#include - +#include "common/enforce.h" namespace paddle_mobile { namespace framework { @@ -71,13 +68,9 @@ struct Dim<0> { Dim() {} Dim(int idx, const Dim<0> &size) { -#ifndef __CUDA_ARCH__ if (idx > 0) { - throw std::invalid_argument("Index out of range."); + PADDLE_MOBILE_THROW_EXCEPTION("Index out of range.") } -#else - PADDLE_ASSERT(idx == 0); -#endif } bool operator==(const Dim<0> &o) const { return true; } @@ -123,13 +116,10 @@ struct DimGetter<0> { template int64_t &indexer(Dim &dim, int idx) { -#ifndef __CUDA_ARCH__ if (idx < 0) { - throw std::invalid_argument("Tried to access a negative dimension"); + PADDLE_MOBILE_THROW_EXCEPTION("Tried to access a negative dimension") } -#else - PADDLE_ASSERT(idx >= 0); -#endif + if (idx == 0) { return dim.head; } @@ -138,30 +128,14 @@ int64_t &indexer(Dim &dim, int idx) { template <> int64_t &indexer<0>(Dim<0> &dim, int idx) { -#ifndef __CUDA_ARCH__ - throw std::invalid_argument("Invalid index"); -#else - PADDLE_ASSERT(false); -#if CUDA_VERSION < 8000 - // On CUDA versions previous to 8.0, only __shared__ variables - // could be declared as static in the device code. - int64_t head = 0; -#else - static int64_t head = 0; -#endif - return head; -#endif + PADDLE_MOBILE_THROW_EXCEPTION("Invalid index") } template int64_t indexer(const Dim &dim, int idx) { -#ifndef __CUDA_ARCH__ if (idx < 0) { - throw std::invalid_argument("Tried to access a negative dimension"); + PADDLE_MOBILE_THROW_EXCEPTION("Tried to access a negative dimension") } -#else - PADDLE_ASSERT(idx >= 0); -#endif if (idx == 0) { return dim.head; } @@ -170,19 +144,7 @@ int64_t indexer(const Dim &dim, int idx) { template <> int64_t indexer<0>(const Dim<0> &dim, int idx) { -#ifndef __CUDA_ARCH__ - throw std::invalid_argument("Invalid index"); -#else - PADDLE_ASSERT(false); -#if CUDA_VERSION < 8000 - // On CUDA versions previous to 8.0, only __shared__ variables - // could be declared as static in the device code. - int64_t head = 0; -#else - static int64_t head = 0; -#endif - return head; -#endif + PADDLE_MOBILE_THROW_EXCEPTION("Invalid index") } } // namespace @@ -363,50 +325,5 @@ Dim make_dim(Args... idxes) { return Dim(idxes...); } -// Allows us to output a Dim -// XXX For some reason, overloading fails to resolve this correctly -template -typename std::enable_if<(i > 1), std::ostream &>::type operator<<( - std::ostream &os, const Dim &d) { - os << d.head << ", " << d.tail; - return os; -} - -// Base case that allows us to output a Dim -// XXX I wish this could be an overload instead of a template -template -typename std::enable_if<(i == 1), std::ostream &>::type operator<<( - std::ostream &os, const Dim &d) { - os << d.head; - return os; -} - -inline std::ostream &operator<<(std::ostream &os, const Dim<0> &d) { - return os; -} - -template -std::string Dim::to_string() const { - std::stringstream stream; - - stream << *this; - - return stream.str(); -} - -template -Dim linear_to_dimension(int linear_index, Dim extents) { - Dim result; - - for (int i = 0; i < D - 1; ++i) { - result[i] = linear_index % extents[i]; - linear_index /= extents[i]; - } - - result[D - 1] = linear_index; - - return result; -} - } // namespace framework } // namespace paddle_mobile diff --git a/src/framework/lod_tensor.cpp b/src/framework/lod_tensor.cpp index dc0b77f32e0bea4e901c9661a0ec9e7877ce3d5a..0a57d29a0c05c009299d43b3b9f5a59b2c3dc341 100644 --- a/src/framework/lod_tensor.cpp +++ b/src/framework/lod_tensor.cpp @@ -13,53 +13,49 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "lod_tensor.h" -#include -#include -#include -#include namespace paddle_mobile { namespace framework { -std::ostream &operator<<(std::ostream &os, const LoD &lod) { - os << "{"; - for (auto &v : lod) { - os << "{"; - bool is_first = true; - for (auto &i : v) { - if (is_first) { - os << i; - is_first = false; - } else { - os << ", " << i; - } - } - os << "}"; - } - os << "}"; - - return os; -} - -std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { - PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(), - "t.type() is not float"); - os << "dim: " << t.dims() << "\n"; - os << "lod: " << t.lod() << "\n"; - // only print first ten elements - int64_t size = t.numel() < 10 ? t.numel() : 10; - for (int64_t i = 0; i < size; ++i) { - os << t.data()[i] << " "; - } - - return os; -} - -std::string LoDToString(const LoD &lod) { - std::ostringstream stream; - stream << lod; - return stream.str(); -} +// std::ostream &operator<<(std::ostream &os, const LoD &lod) { +// os << "{"; +// for (auto &v : lod) { +// os << "{"; +// bool is_first = true; +// for (auto &i : v) { +// if (is_first) { +// os << i; +// is_first = false; +// } else { +// os << ", " << i; +// } +// } +// os << "}"; +// } +// os << "}"; +// +// return os; +//} +// +// std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { +// PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(), +// "t.type() is not float"); +// os << "dim: " << t.dims() << "\n"; +// os << "lod: " << t.lod() << "\n"; +// // only print first ten elements +// int64_t size = t.numel() < 10 ? t.numel() : 10; +// for (int64_t i = 0; i < size; ++i) { +// os << t.data()[i] << " "; +// } +// +// return os; +//} + +// std::string LoDToString(const LoD &lod) { +// std::ostringstream stream; +// stream << lod; +// return stream.str(); +//} LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { @@ -139,7 +135,7 @@ bool CheckLoD(const LoD &in, int tensor_height) { if (a < b) return true; return false; })) { - std::cout << "ascending error"; + PADDLE_MOBILE_THROW_EXCEPTION("ascending error") return false; } } diff --git a/src/framework/program/program-optimize/node.cpp b/src/framework/program/program-optimize/node.cpp index 131fb2a19caa2e4f537fca70d1bd5322b8c6223a..d128d11edbcac4d1fa916feb7348970abc50147a 100644 --- a/src/framework/program/program-optimize/node.cpp +++ b/src/framework/program/program-optimize/node.cpp @@ -144,7 +144,7 @@ void Node::Folder( } } } - +#ifdef PADDLE_MOBILE_DEBUG std::string Node::ToString(std::string blank, const Node *node) const { std::stringstream ss; ss << type_ << "-> \n"; @@ -175,6 +175,7 @@ Print &operator<<(Print &printer, const Node &node) { printer << node.ToString(); return printer; } +#endif } // namespace framework } // namespace paddle_mobile diff --git a/src/framework/program/program-optimize/node.h b/src/framework/program/program-optimize/node.h index 4a16f0ac8002ab58c0c549f9c85ede7d49ec30dc..66e855a9302abbbf35f58eabde7ee4554369b305 100644 --- a/src/framework/program/program-optimize/node.h +++ b/src/framework/program/program-optimize/node.h @@ -17,7 +17,6 @@ limitations under the License. */ #include #include #include - #include "common/log.h" #include "framework/program/op_desc.h" @@ -34,7 +33,11 @@ class Node { : op_desc_(op_desc), type_(op_desc->Type()) {} Node &operator>(std::shared_ptr node); bool operator==(const Node &in); + +#ifdef PADDLE_MOBILE_DEBUG std::string ToString() const; + void Description(); +#endif std::shared_ptr To(int size); uint Depth(uint begin = 0); Node &Folder( @@ -44,7 +47,6 @@ class Node { std::vector> OpDescs(uint size); std::shared_ptr OpDescOfNode() { return op_desc_; } std::string Type() { return type_; } - void Description(); private: void OpDescs(uint size, @@ -56,7 +58,9 @@ class Node { std::map> *change, Node *begin_node, std::vector> *removed_nodes); std::shared_ptr op_desc_; +#ifdef PADDLE_MOBILE_DEBUG std::string ToString(std::string blank, const Node *node) const; +#endif std::vector> outputs_; std::vector inputs_; std::string type_; diff --git a/src/framework/scope.h b/src/framework/scope.h index 8b194654f61d7502184b45c7eb07d655b70784dc..27702b88c0c188957b46496dcc4a548a54355c14 100644 --- a/src/framework/scope.h +++ b/src/framework/scope.h @@ -14,17 +14,17 @@ limitations under the License. */ #pragma once -#include //std::list -#include //std::mutex -#include //std::unordered_map +#include +#include +#include #include "variable.h" namespace paddle_mobile { namespace framework { class Scope { public: - Scope() {} - ~Scope() {} + Scope() = default; + ~Scope() = default; Scope &NewScope() const; diff --git a/src/framework/tensor_util.cpp b/src/framework/tensor_util.cpp index 1b0cc002bfeb9ea5cebdd4efd6c2da77cc971dfe..465502cb19173e26361905752e76e75c15229893 100644 --- a/src/framework/tensor_util.cpp +++ b/src/framework/tensor_util.cpp @@ -13,89 +13,19 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "tensor_util.h" -#include -#include -#include namespace paddle_mobile { namespace framework { void TensorCopy(const Tensor &src, Tensor *dst) { - // VLOG(3) << "TensorCopy " << src.dims() << " from " << - // src.place() << " to - // " - // << dst_place; src.check_memory_size(); - dst->Resize(src.dims()); dst->set_layout(src.layout()); auto src_ptr = src.data(); - auto dst_ptr = dst->mutable_data(src.type()); - auto size = src.numel() * SizeOfType(src.type()); - memory::Copy(dst_ptr, src_ptr, size); } -void TensorCopySync(const Tensor &src, Tensor *dst) { - src.check_memory_size(); - dst->Resize(src.dims()); - dst->set_layout(src.layout()); - auto src_ptr = src.data(); - auto dst_ptr = dst->mutable_data(src.type()); - auto size = src.numel() * SizeOfType(src.type()); - memory::Copy(dst_ptr, src_ptr, size); -} - -template -struct AnyDTypeVisitor { - Predicate predicate_; - const Tensor &tensor_; - Tensor *out_; - - AnyDTypeVisitor(Predicate predicate, const Tensor &tensor, Tensor *out) - : predicate_(predicate), tensor_(tensor), out_(out) {} - - template - void operator()() const { - // auto t = EigenVector::Flatten(tensor_); - // auto o = EigenScalar::From(*out_); - // return any of predicate_(t) is true. - // o.device(*ctx_.eigen_device()) = predicate_(t).any(); - } -}; - -struct ContainsNANPredicate { - template - auto operator()(const T &eigen_vec) const - -> decltype(std::declval().isnan()) { - // Cast eigen_vector to vector of bool. true if is inf. - return eigen_vec.isnan(); - } -}; - -struct ContainsInfPredicate { - template - auto operator()(const T &eigen_vec) const - -> decltype(std::declval().isinf()) { - // Cast eigen_vector to vector of bool. true if is inf. - return eigen_vec.isinf(); - } -}; - -struct DeserializedDataFunctor { - DeserializedDataFunctor(void **buf, Tensor *tensor) - : buf_(buf), tensor_(tensor) {} - - template - void operator()() { - *buf_ = tensor_->mutable_data(); - } - - void **buf_; - Tensor *tensor_; -}; - } // namespace framework } // namespace paddle_mobile diff --git a/src/framework/tensor_util.h b/src/framework/tensor_util.h index 898482fee8d69edd8e8fce284398ed91bd86cd90..f888049b395e48b9d10cea731b092c899952e3d8 100644 --- a/src/framework/tensor_util.h +++ b/src/framework/tensor_util.h @@ -21,44 +21,6 @@ namespace paddle_mobile { namespace framework { void TensorCopy(const Tensor &src, Tensor *dst); -void TensorCopySync(const Tensor &src, Tensor *dst); - -template -void TensorFromVector(const std::vector &src, Tensor *dst); - -template -void TesnorToVector(const Tensor &src, std::vector *dst); - -bool TensorContainsNAN(const framework::Tensor &tensor); -bool TensorContainsInf(const framework::Tensor &tensor); - -void TensorToStream(std::ostream &os, const Tensor &tensor); -void TensorFromStream(std::istream &is, Tensor *tensor); - -// -// The implementation of template functions. -// - -template -void TensorFromVector(const std::vector &src, Tensor *dst) { - auto src_ptr = static_cast(src.data()); - dst->Resize({static_cast(src.size())}); - auto dst_ptr = static_cast(dst->mutable_data()); - auto size = src.size() * sizeof(T); - - memory::Copy(dst_ptr, src_ptr, size); -} - -template -void TensorToVector(const Tensor &src, std::vector *dst) { - auto src_ptr = static_cast(src.data()); - auto size = src.numel() * sizeof(T); - - dst->resize(src.numel()); - auto dst_ptr = static_cast(dst->data()); - - memory::Copy(dst_ptr, src_ptr, size); -} } // namespace framework } // namespace paddle_mobile diff --git a/src/operators/kernel/arm/batchnorm_kernel.cpp b/src/operators/kernel/arm/batchnorm_kernel.cpp index e441e6cf3816ee5a5d21b5fcd1d1dc02d59ae39d..4327b7f3163f013f270ca4428227075e4883f96c 100644 --- a/src/operators/kernel/arm/batchnorm_kernel.cpp +++ b/src/operators/kernel/arm/batchnorm_kernel.cpp @@ -49,7 +49,7 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { Tensor inv_std; auto inv_std_ptr = inv_std.mutable_data(make_ddim({C})); if (C != variance->numel()) { - std::cout << "C must equal to variance.numel()" << std::endl; + DLOG << "C must equal to variance.numel()"; } assert(C == variance->numel()); diff --git a/tools/build.sh b/tools/build.sh index e7869bb890429dd455fe25539974b4bc639a0d81..6a0b7b57929352dce3a26a1f3bd9be6e9719d911 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -46,7 +46,7 @@ build_for_android() { if [ "${PLATFORM}" = "arm-v7a" ]; then ABI="armeabi-v7a with NEON" ARM_PLATFORM="V7" - CXX_FLAGS="-O3 -std=c++11 -s -march=armv7-a -mfpu=neon -mfloat-abi=softfp -pie -fPIE -w -Wno-error=format-security -llog" + CXX_FLAGS="-O3 -std=c++11 -s -march=armv7-a -mfpu=neon -mfloat-abi=softfp -pie -fPIE -w -Wno-error=format-security -fno-exceptions" elif [ "${PLATFORM}" = "arm-v8a" ]; then ABI="arm64-v8a" ARM_PLATFORM="V8" @@ -62,29 +62,30 @@ build_for_android() { TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake" ANDROID_ARM_MODE="arm" if [ $# -eq 1 ]; then - NET=$1 - cmake .. \ - -B"../build/release/${PLATFORM}" \ - -DANDROID_ABI="${ABI}" \ - -DCMAKE_BUILD_TYPE="${MODE}" \ - -DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \ - -DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \ - -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ - -DANDROID_STL=c++_static \ - -DANDROID=true \ - -D"${NET}"=true \ - -D"${ARM_PLATFORM}"=true + NET=$1 + cmake .. \ + -B"../build/release/${PLATFORM}" \ + -DANDROID_ABI="${ABI}" \ + -DCMAKE_BUILD_TYPE="${MODE}" \ + -DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \ + -DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \ + -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ + -DANDROID_STL=c++_static \ + -DANDROID=true \ + -D"${NET}=true" \ + -D"${ARM_PLATFORM}"=true else - cmake .. \ - -B"../build/release/${PLATFORM}" \ - -DANDROID_ABI="${ABI}" \ - -DCMAKE_BUILD_TYPE="${MODE}" \ - -DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \ - -DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \ - -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ - -DANDROID_STL=c++_static \ - -DANDROID=true \ - -D"${ARM_PLATFORM}"=true + + cmake .. \ + -B"../build/release/${PLATFORM}" \ + -DANDROID_ABI="${ABI}" \ + -DCMAKE_BUILD_TYPE="${MODE}" \ + -DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \ + -DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \ + -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ + -DANDROID_STL=c++_static \ + -DANDROID=true \ + -D"${ARM_PLATFORM}"=true fi cd "../build/release/${PLATFORM}" make -j 8