未验证 提交 ee4f388a 编写于 作者: R Ruilong Liu 提交者: GitHub

Merge pull request #387 from codeWorm2015/develop

#386 format files
......@@ -711,47 +711,6 @@ static inline size_t uint32_pack(uint32_t value, uint8_t *out) {
return rv;
}
/**
* Pack a signed 32-bit integer and return the number of bytes written.
* Negative numbers are encoded as two's complement 64-bit integers.
*
* \param value
* Value to encode.
* \param[out] out
* Packed value.
* \return
* Number of bytes written to `out`.
*/
static inline size_t int32_pack(int32_t value, uint8_t *out) {
if (value < 0) {
out[0] = value | 0x80;
out[1] = (value >> 7) | 0x80;
out[2] = (value >> 14) | 0x80;
out[3] = (value >> 21) | 0x80;
out[4] = (value >> 28) | 0x80;
out[5] = out[6] = out[7] = out[8] = 0xff;
out[9] = 0x01;
return 10;
} else {
return uint32_pack(value, out);
}
}
/**
* Pack a signed 32-bit integer using ZigZag encoding and return the number of
* bytes written.
*
* \param value
* Value to encode.
* \param[out] out
* Packed value.
* \return
* Number of bytes written to `out`.
*/
static inline size_t sint32_pack(int32_t value, uint8_t *out) {
return uint32_pack(zigzag32(value), out);
}
/**
* Pack a 64-bit unsigned integer using base-128 varint encoding and return the
* number of bytes written.
......@@ -789,116 +748,6 @@ static size_t uint64_pack(uint64_t value, uint8_t *out) {
return rv;
}
/**
* Pack a 64-bit signed integer in ZigZag encoding and return the number of
* bytes written.
*
* \param value
* Value to encode.
* \param[out] out
* Packed value.
* \return
* Number of bytes written to `out`.
*/
static inline size_t sint64_pack(int64_t value, uint8_t *out) {
return uint64_pack(zigzag64(value), out);
}
/**
* Pack a 32-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed32, sfixed32, float. Similar to "htole32".
*
* \param value
* Value to encode.
* \param[out] out
* Packed value.
* \return
* Number of bytes written to `out`.
*/
static inline size_t fixed32_pack(uint32_t value, void *out) {
#if !defined(WORDS_BIGENDIAN)
memcpy(out, &value, 4);
#else
uint8_t *buf = out;
buf[0] = value;
buf[1] = value >> 8;
buf[2] = value >> 16;
buf[3] = value >> 24;
#endif
return 4;
}
/**
* Pack a 64-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed64, sfixed64, double. Similar to "htole64".
*
* \todo The big-endian impl is really only good for 32-bit machines, a 64-bit
* version would be appreciated, plus a way to decide to use 64-bit math where
* convenient.
*
* \param value
* Value to encode.
* \param[out] out
* Packed value.
* \return
* Number of bytes written to `out`.
*/
static inline size_t fixed64_pack(uint64_t value, void *out) {
#if !defined(WORDS_BIGENDIAN)
memcpy(out, &value, 8);
#else
fixed32_pack(value, out);
fixed32_pack(value >> 32, ((char *)out) + 4);
#endif
return 8;
}
/**
* Pack a boolean value as an integer and return the number of bytes written.
*
* \todo Perhaps on some platforms *out = !!value would be a better impl, b/c
* that is idiomatic C++ in some STL implementations.
*
* \param value
* Value to encode.
* \param[out] out
* Packed value.
* \return
* Number of bytes written to `out`.
*/
static inline size_t boolean_pack(protobuf_c_boolean value, uint8_t *out) {
*out = value ? TRUE : FALSE;
return 1;
}
/**
* Pack a NUL-terminated C string and return the number of bytes written. The
* output includes a length delimiter.
*
* The NULL pointer is treated as an empty string. This isn't really necessary,
* but it allows people to leave required strings blank. (See Issue #13 in the
* bug tracker for a little more explanation).
*
* \param str
* String to encode.
* \param[out] out
* Packed value.
* \return
* Number of bytes written to `out`.
*/
static inline size_t string_pack(const char *str, uint8_t *out) {
if (str == NULL) {
out[0] = 0;
return 1;
} else {
size_t len = strlen(str);
size_t rv = uint32_pack(len, out);
memcpy(out + rv, str, len);
return rv + len;
}
}
/**
* Pack a ProtobufCBinaryData and return the number of bytes written. The output
* includes a length delimiter.
......@@ -978,236 +827,6 @@ static inline size_t sizeof_elt_in_repeated_array(ProtobufCType type) {
return 0;
}
/**
* Pack an array of 32-bit quantities.
*
* \param[out] out
* Destination.
* \param[in] in
* Source.
* \param[in] n
* Number of elements in the source array.
*/
static void copy_to_little_endian_32(void *out, const void *in,
const unsigned n) {
#if !defined(WORDS_BIGENDIAN)
memcpy(out, in, n * 4);
#else
unsigned i;
const uint32_t *ini = in;
for (i = 0; i < n; i++) fixed32_pack(ini[i], (uint32_t *)out + i);
#endif
}
/**
* Pack an array of 64-bit quantities.
*
* \param[out] out
* Destination.
* \param[in] in
* Source.
* \param[in] n
* Number of elements in the source array.
*/
static void copy_to_little_endian_64(void *out, const void *in,
const unsigned n) {
#if !defined(WORDS_BIGENDIAN)
memcpy(out, in, n * 8);
#else
unsigned i;
const uint64_t *ini = in;
for (i = 0; i < n; i++) fixed64_pack(ini[i], (uint64_t *)out + i);
#endif
}
/**
* Get the minimum number of bytes required to pack a field value of a
* particular type.
*
* \param type
* Field type.
* \return
* Number of bytes.
*/
static unsigned get_type_min_size(ProtobufCType type) {
if (type == PROTOBUF_C_TYPE_SFIXED32 || type == PROTOBUF_C_TYPE_FIXED32 ||
type == PROTOBUF_C_TYPE_FLOAT) {
return 4;
}
if (type == PROTOBUF_C_TYPE_SFIXED64 || type == PROTOBUF_C_TYPE_FIXED64 ||
type == PROTOBUF_C_TYPE_DOUBLE) {
return 8;
}
return 1;
}
/**
* Get the packed size of an array of same field type.
*
* \param field
* Field descriptor.
* \param count
* Number of elements of this type.
* \param array
* The elements to get the size of.
* \return
* Number of bytes required.
*/
static size_t get_packed_payload_length(const ProtobufCFieldDescriptor *field,
unsigned count, const void *array) {
unsigned rv = 0;
unsigned i;
switch (field->type) {
case PROTOBUF_C_TYPE_SFIXED32:
case PROTOBUF_C_TYPE_FIXED32:
case PROTOBUF_C_TYPE_FLOAT:
return count * 4;
case PROTOBUF_C_TYPE_SFIXED64:
case PROTOBUF_C_TYPE_FIXED64:
case PROTOBUF_C_TYPE_DOUBLE:
return count * 8;
case PROTOBUF_C_TYPE_ENUM:
case PROTOBUF_C_TYPE_INT32: {
const int32_t *arr = (const int32_t *)array;
for (i = 0; i < count; i++) rv += int32_size(arr[i]);
break;
}
case PROTOBUF_C_TYPE_SINT32: {
const int32_t *arr = (const int32_t *)array;
for (i = 0; i < count; i++) rv += sint32_size(arr[i]);
break;
}
case PROTOBUF_C_TYPE_UINT32: {
const uint32_t *arr = (const uint32_t *)array;
for (i = 0; i < count; i++) rv += uint32_size(arr[i]);
break;
}
case PROTOBUF_C_TYPE_SINT64: {
const int64_t *arr = (const int64_t *)array;
for (i = 0; i < count; i++) rv += sint64_size(arr[i]);
break;
}
case PROTOBUF_C_TYPE_INT64:
case PROTOBUF_C_TYPE_UINT64: {
const uint64_t *arr = (const uint64_t *)array;
for (i = 0; i < count; i++) rv += uint64_size(arr[i]);
break;
}
case PROTOBUF_C_TYPE_BOOL:
return count;
default:
PROTOBUF_C__ASSERT_NOT_REACHED();
}
return rv;
}
/**
* Pack an array of same field type to a virtual buffer.
*
* \param field
* Field descriptor.
* \param count
* Number of elements of this type.
* \param array
* The elements to get the size of.
* \param[out] buffer
* Virtual buffer to append data to.
* \return
* Number of bytes packed.
*/
static size_t pack_buffer_packed_payload(const ProtobufCFieldDescriptor *field,
unsigned count, const void *array,
ProtobufCBuffer *buffer) {
uint8_t scratch[16];
size_t rv = 0;
unsigned i;
switch (field->type) {
case PROTOBUF_C_TYPE_SFIXED32:
case PROTOBUF_C_TYPE_FIXED32:
case PROTOBUF_C_TYPE_FLOAT:
#if !defined(WORDS_BIGENDIAN)
rv = count * 4;
goto no_packing_needed;
#else
for (i = 0; i < count; i++) {
unsigned len = fixed32_pack(((uint32_t *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
break;
#endif
case PROTOBUF_C_TYPE_SFIXED64:
case PROTOBUF_C_TYPE_FIXED64:
case PROTOBUF_C_TYPE_DOUBLE:
#if !defined(WORDS_BIGENDIAN)
rv = count * 8;
goto no_packing_needed;
#else
for (i = 0; i < count; i++) {
unsigned len = fixed64_pack(((uint64_t *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
break;
#endif
case PROTOBUF_C_TYPE_ENUM:
case PROTOBUF_C_TYPE_INT32:
for (i = 0; i < count; i++) {
unsigned len = int32_pack(((int32_t *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
break;
case PROTOBUF_C_TYPE_SINT32:
for (i = 0; i < count; i++) {
unsigned len = sint32_pack(((int32_t *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
break;
case PROTOBUF_C_TYPE_UINT32:
for (i = 0; i < count; i++) {
unsigned len = uint32_pack(((uint32_t *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
break;
case PROTOBUF_C_TYPE_SINT64:
for (i = 0; i < count; i++) {
unsigned len = sint64_pack(((int64_t *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
break;
case PROTOBUF_C_TYPE_INT64:
case PROTOBUF_C_TYPE_UINT64:
for (i = 0; i < count; i++) {
unsigned len = uint64_pack(((uint64_t *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
break;
case PROTOBUF_C_TYPE_BOOL:
for (i = 0; i < count; i++) {
unsigned len = boolean_pack(((protobuf_c_boolean *)array)[i], scratch);
buffer->append(buffer, len, scratch);
rv += len;
}
return count;
default:
PROTOBUF_C__ASSERT_NOT_REACHED();
}
return rv;
#if !defined(WORDS_BIGENDIAN)
no_packing_needed:
buffer->append(buffer, rv, array);
return rv;
#endif
}
static inline int int_range_lookup(unsigned n_ranges,
const ProtobufCIntRange *ranges, int value) {
unsigned n;
......
......@@ -56,10 +56,5 @@ inline std::string DataLayoutToString(const DataLayout &data_layout) {
}
}
inline std::ostream &operator<<(std::ostream &out, const DataLayout &l) {
out << DataLayoutToString(l);
return out;
}
} // namespace framework
} // namespace paddle_mobile
......@@ -17,13 +17,7 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const {
std::vector<std::shared_ptr<VarDesc>> res;
for (const auto &p : vars_) {
res.push_back(p.second);
}
return res;
}
std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const { return vars_; }
std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const { return ops_; }
......@@ -31,10 +25,14 @@ BlockDesc::BlockDesc(PaddleMobile__Framework__Proto__BlockDesc *desc)
: index_(desc->idx), parent_index_(desc->idx) {
for (int i = 0; i < desc->n_vars; ++i) {
PaddleMobile__Framework__Proto__VarDesc *var_desc = desc->vars[i];
vars_[std::string(var_desc->name)] =
std::shared_ptr<VarDesc>(new VarDesc(var_desc));
vars_.emplace_back(std::shared_ptr<VarDesc>(new VarDesc(var_desc)));
}
std::sort(vars_.begin(), vars_.end(),
[](std::shared_ptr<VarDesc> left, std::shared_ptr<VarDesc> right) {
return left->Name() < right->Name();
});
for (int j = 0; j < desc->n_ops; ++j) {
PaddleMobile__Framework__Proto__OpDesc *op_desc = desc->ops[j];
ops_.emplace_back(new framework::OpDesc(op_desc));
......
......@@ -34,10 +34,9 @@ class BlockDesc {
ops_.push_back(copy_op_desc);
}
for (auto &var_desc : block_desc.vars_) {
std::shared_ptr<VarDesc> copy_var_desc =
std::make_shared<VarDesc>(*var_desc.second);
vars_[var_desc.first] = copy_var_desc;
for (int i = 0; i < block_desc.vars_.size(); ++i) {
auto &var_desc = block_desc.vars_[i];
vars_.emplace_back(std::make_shared<VarDesc>(*var_desc));
}
}
......@@ -63,7 +62,7 @@ class BlockDesc {
bool multi_thread_;
int parent_index_;
std::vector<std::shared_ptr<OpDesc>> ops_;
std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_;
std::vector<std::shared_ptr<VarDesc>> vars_;
};
} // namespace framework
......
......@@ -43,13 +43,13 @@ bool Node::operator==(const Node &in) {
return true;
}
std::vector<std::shared_ptr<framework::OpDesc>> Node::OpDescs(uint size) {
std::vector<std::shared_ptr<framework::OpDesc>> Node::OpDescs(int size) {
std::vector<std::shared_ptr<framework::OpDesc>> op_descs;
OpDescs(size - 1, &op_descs);
return op_descs;
}
void Node::OpDescs(uint index,
void Node::OpDescs(int index,
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc) {
if (index == 0) {
return;
......@@ -80,18 +80,18 @@ void Node::To(int index, std::shared_ptr<Node> node) {
}
}
uint Node::Depth(uint begin) {
uint depth = 0;
int Node::Depth(int begin) {
int depth = 0;
begin++;
for (int i = 0; i < outputs_.size(); ++i) {
uint output_depth = outputs_[i]->Depth(begin);
int output_depth = outputs_[i]->Depth(begin);
depth = output_depth > depth ? output_depth : depth;
}
return begin > depth ? begin : depth;
}
Node &Node::Folder(
uint size, std::string type,
int size, std::string type,
std::map<std::string, std::pair<std::string, std::string>> change,
std::vector<std::shared_ptr<Node>> *removed_nodes) {
std::shared_ptr<framework::OpDesc> op_desc =
......@@ -108,7 +108,7 @@ Node &Node::Folder(
void Node::Folder(
std::shared_ptr<framework::OpDesc> op_desc,
std::vector<std::shared_ptr<Node>> *outputs, uint index,
std::vector<std::shared_ptr<Node>> *outputs, int index,
std::map<std::string, std::pair<std::string, std::string>> *change,
Node *begin_node, std::vector<std::shared_ptr<Node>> *removed_nodes) {
if (change->find(this->type_) != change->end()) {
......
......@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include <cinttypes>
#include <map>
#include <string>
#include <vector>
......@@ -39,22 +40,22 @@ class Node {
void Description();
#endif
std::shared_ptr<Node> To(int size);
uint Depth(uint begin = 0);
int Depth(int begin = 0);
Node &Folder(
uint size, std::string type,
int size, std::string type,
std::map<std::string, std::pair<std::string, std::string>> change_map,
std::vector<std::shared_ptr<Node>> *removed_nodes);
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(uint size);
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(int size);
std::shared_ptr<framework::OpDesc> OpDescOfNode() { return op_desc_; }
std::string Type() { return type_; }
private:
void OpDescs(uint size,
void OpDescs(int size,
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc);
void To(int index, std::shared_ptr<Node>);
void Folder(
std::shared_ptr<framework::OpDesc> op_desc,
std::vector<std::shared_ptr<Node>> *outputs, uint index,
std::vector<std::shared_ptr<Node>> *outputs, int index,
std::map<std::string, std::pair<std::string, std::string>> *change,
Node *begin_node, std::vector<std::shared_ptr<Node>> *removed_nodes);
std::shared_ptr<framework::OpDesc> op_desc_;
......
......@@ -28,6 +28,8 @@ class Program {
std::shared_ptr<ProgramDesc> optimizeProgram;
std::shared_ptr<Scope> scope;
std::string model_path;
std::string para_path;
bool is_commbine = false;
private:
};
......
......@@ -22,6 +22,7 @@ limitations under the License. */
#include <vector>
#include "common/enforce.h"
#include "common/enforce.h"
#include "framework/data_layout.h"
#include "framework/ddim.h"
#include "memory/t_malloc.h"
......
......@@ -158,7 +158,25 @@ void Loader<Dtype, P>::LoadVar(framework::Variable *variable,
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname, bool optimize) {
std::string model_filename = dirname + "/__model__";
auto program = this->LoadProgram(dirname + "/__model__", optimize);
program.model_path = dirname;
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path,
bool optimize) {
auto program = this->LoadProgram(model_path, optimize);
program.para_path = para_path;
program.is_commbine = true;
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize) {
std::string model_filename = model_path;
PaddleMobile__Framework__Proto__ProgramDesc *c_program;
uint8_t *buf = NULL;
size_t read_size = ReadBuffer(model_filename.c_str(), &buf);
......@@ -175,7 +193,6 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program);
framework::Program<Dtype, P> program;
program.model_path = dirname;
program.originProgram = originProgramDesc;
auto scope = std::make_shared<framework::Scope>();
......@@ -253,16 +270,17 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
ops_of_block_[*block_desc.get()].push_back(op_base);
}
}
InitMemory();
if (program_.is_commbine) {
InitCombineMemory();
} else {
InitMemory();
}
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor,
const std::string &file_path) {
char *origin_data = Get_binary_data(file_path);
char *data = origin_data;
const std::string &file_path, char *data) {
// 1. version
uint32_t version = *(uint32_t *)data;
data += sizeof(uint32_t);
......@@ -348,8 +366,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
for (int n = 0; n < memory_size * type_size; ++n) {
static_cast<char *>(memory)[n] = data[n];
}
delete origin_data;
data += (sizeof(char) * memory_size * type_size);
}
template <typename Dtype, Precision P>
......@@ -362,8 +379,12 @@ void Executor<Dtype, P>::InitMemory() {
if (var_desc->Name() == "feed" || var_desc->Name() == "fetch") {
continue;
}
char *origin_data =
Get_binary_data(program_.model_path + "/" + var_desc->Name());
LoadMemory(*var_desc, tensor,
program_.model_path + "/" + var_desc->Name());
program_.model_path + "/" + var_desc->Name(), origin_data);
delete origin_data;
} else {
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
auto tensor = var->template GetMutable<framework::LoDTensor>();
......@@ -375,6 +396,33 @@ void Executor<Dtype, P>::InitMemory() {
}
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::InitCombineMemory() {
char *origin_data = Get_binary_data(program_.para_path);
for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) {
auto var = program_.scope->Var(var_desc->Name());
if (var_desc->Persistable()) {
auto tensor = var->template GetMutable<framework::LoDTensor>();
if (var_desc->Name() == "feed" || var_desc->Name() == "fetch") {
continue;
}
LoadMemory(*var_desc, tensor,
program_.model_path + "/" + var_desc->Name(), origin_data);
} else {
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
auto tensor = var->template GetMutable<framework::LoDTensor>();
tensor->template mutable_data<Ptype>();
}
}
}
}
delete origin_data;
}
template <typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t) {
......
......@@ -33,7 +33,13 @@ class Loader {
const framework::Program<Dtype, P> Load(const std::string &dirname,
bool optimize = false);
const framework::Program<Dtype, P> Load(const std::string &model_path,
const std::string &para_path,
bool optimize = false);
private:
const framework::Program<Dtype, P> LoadProgram(const std::string &model_path,
bool optimize = false);
void LoadVar(framework::Variable *variable,
const framework::VarDesc &var_desc,
const std::string &file_path);
......@@ -57,7 +63,9 @@ class Executor {
void InitMemory();
void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, const std::string &file_path);
framework::LoDTensor *tensor, const std::string &file_path,
char *data);
void InitCombineMemory();
framework::Program<Dtype> program_;
int batch_size_ = 1;
std::shared_ptr<framework::ProgramDesc> to_predict_program_;
......
......@@ -20,6 +20,9 @@ int main() {
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
auto program = loader.Load(g_resnet, true);
loader.Load(g_googlenet_combine + "/model", g_googlenet_combine + "/params",
true);
program.originProgram->Description("program desc: ");
return 0;
}
......@@ -18,9 +18,12 @@ limitations under the License. */
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
bool optimize = true;
bool optimize = false;
auto time1 = time();
auto program = loader.Load(g_googlenet, optimize);
// auto program = loader.Load(g_googlenet_combine + "/model",
// g_googlenet_combine + "/params", optimize);
auto time2 = time();
DLOG << "load cost :" << time_diff(time1, time2) << "ms\n";
paddle_mobile::Executor<paddle_mobile::CPU> executor(program, 1, optimize);
......
......@@ -28,6 +28,7 @@ static const std::string g_googlenet = "../models/googlenet";
static const std::string g_mobilenet = "../models/mobilenet";
static const std::string g_resnet_50 = "../models/resnet_50";
static const std::string g_resnet = "../models/resnet";
static const std::string g_googlenet_combine = "../models/googlenet_combine";
static const std::string g_yolo = "../models/yolo";
static const std::string g_test_image_1x3x224x224 =
"../images/test_image_1x3x224x224_float";
......
......@@ -70,6 +70,7 @@ build_for_android() {
-DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \
-DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DCMAKE_LDFLAGS="-Wl,--gc-sections --icf=safe" \
-DANDROID_STL=c++_static \
-DANDROID=true \
-D"${NET}=true" \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册