提交 64cfa727 编写于 作者: H hjchen2

Refine java api, add write_to_array and read_from_array op

上级 195eb11c
...@@ -85,18 +85,18 @@ struct Print { ...@@ -85,18 +85,18 @@ struct Print {
private: private:
void print(LogLevel level) { void print(LogLevel level) {
buffer_ << std::endl; // buffer_ << std::endl;
if (level == kLOG_ERROR) { if (level == kLOG_ERROR) {
std::cerr << buffer_.str(); std::cerr << buffer_.str() << std::endl;
} else { } else {
std::cout << buffer_.str(); std::cout << buffer_.str() << std::endl;
} }
} }
std::ostringstream buffer_; std::ostringstream buffer_;
}; };
struct ToLog { struct ToLog {
ToLog(LogLevel level = kLOG_DEBUG, const std::string &info = "") explicit ToLog(LogLevel level = kLOG_DEBUG, const std::string &info = "")
: level_(level) { : level_(level) {
unsigned blanks = unsigned blanks =
(unsigned)(level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1); (unsigned)(level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1);
......
...@@ -81,6 +81,8 @@ const char *G_OP_TYPE_LOGICAL_AND = "logical_and"; ...@@ -81,6 +81,8 @@ const char *G_OP_TYPE_LOGICAL_AND = "logical_and";
const char *G_OP_TYPE_LOGICAL_OR = "logical_or"; const char *G_OP_TYPE_LOGICAL_OR = "logical_or";
const char *G_OP_TYPE_LOGICAL_NOT = "logical_not"; const char *G_OP_TYPE_LOGICAL_NOT = "logical_not";
const char *G_OP_TYPE_LOGICAL_XOR = "logical_xor"; const char *G_OP_TYPE_LOGICAL_XOR = "logical_xor";
const char *G_OP_TYPE_WRITE_TO_ARRAY = "write_to_array";
const char *G_OP_TYPE_READ_FROM_ARRAY = "read_from_array";
const char *G_OP_TYPE_QUANTIZE = "quantize"; const char *G_OP_TYPE_QUANTIZE = "quantize";
const char *G_OP_TYPE_DEQUANTIZE = "dequantize"; const char *G_OP_TYPE_DEQUANTIZE = "dequantize";
...@@ -189,5 +191,7 @@ std::unordered_map< ...@@ -189,5 +191,7 @@ std::unordered_map<
{G_OP_TYPE_LOGICAL_AND, {{"X", "Y"}, {"Out"}}}, {G_OP_TYPE_LOGICAL_AND, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_OR, {{"X", "Y"}, {"Out"}}}, {G_OP_TYPE_LOGICAL_OR, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_XOR, {{"X", "Y"}, {"Out"}}}, {G_OP_TYPE_LOGICAL_XOR, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_NOT, {{"X"}, {"Out"}}}}; {G_OP_TYPE_LOGICAL_NOT, {{"X"}, {"Out"}}},
{G_OP_TYPE_WRITE_TO_ARRAY, {{"X", "I"}, {"Out"}}},
{G_OP_TYPE_READ_FROM_ARRAY, {{"X", "I"}, {"Out"}}}};
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -170,6 +170,8 @@ extern const char *G_OP_TYPE_LOGICAL_AND; ...@@ -170,6 +170,8 @@ extern const char *G_OP_TYPE_LOGICAL_AND;
extern const char *G_OP_TYPE_LOGICAL_OR; extern const char *G_OP_TYPE_LOGICAL_OR;
extern const char *G_OP_TYPE_LOGICAL_NOT; extern const char *G_OP_TYPE_LOGICAL_NOT;
extern const char *G_OP_TYPE_LOGICAL_XOR; extern const char *G_OP_TYPE_LOGICAL_XOR;
extern const char *G_OP_TYPE_WRITE_TO_ARRAY;
extern const char *G_OP_TYPE_READ_FROM_ARRAY;
extern const char *G_OP_TYPE_QUANTIZE; extern const char *G_OP_TYPE_QUANTIZE;
extern const char *G_OP_TYPE_DEQUANTIZE; extern const char *G_OP_TYPE_DEQUANTIZE;
......
...@@ -90,6 +90,10 @@ class Attribute { ...@@ -90,6 +90,10 @@ class Attribute {
attr.Set<int64_t>(attr_desc->l); attr.Set<int64_t>(attr_desc->l);
break; break;
} }
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK: {
attr.Set<int>(attr_desc->block_idx);
break;
}
default: default:
PADDLE_MOBILE_THROW_EXCEPTION("attr type not support"); PADDLE_MOBILE_THROW_EXCEPTION("attr type not support");
} }
......
...@@ -65,6 +65,7 @@ Executor<Device, T>::Executor(const Program<Device> &program, ...@@ -65,6 +65,7 @@ Executor<Device, T>::Executor(const Program<Device> &program,
for (int j = 0; j < ops.size(); ++j) { for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op_desc = ops[j]; std::shared_ptr<OpDesc> op_desc = ops[j];
DLOG << "create op: " << op_desc->Type(); DLOG << "create op: " << op_desc->Type();
auto op_handler = OpRegistry<Device>::CreateOp( auto op_handler = OpRegistry<Device>::CreateOp(
op_desc->Type(), op_desc->GetInputs(), op_desc->GetOutputs(), op_desc->Type(), op_desc->GetInputs(), op_desc->GetOutputs(),
op_desc->GetAttrMap(), program_.scope); op_desc->GetAttrMap(), program_.scope);
......
...@@ -297,3 +297,12 @@ LOAD_OP1(logical_not, CPU); ...@@ -297,3 +297,12 @@ LOAD_OP1(logical_not, CPU);
#ifdef LOGICAL_XOR_OP #ifdef LOGICAL_XOR_OP
LOAD_OP1(logical_xor, CPU); LOAD_OP1(logical_xor, CPU);
#endif #endif
#ifdef WHILE_OP
LOAD_OP1(while, CPU);
#endif
#ifdef WRITE_TO_ARRAY_OP
LOAD_OP1(write_to_array, CPU);
#endif
#ifdef READ_FROM_ARRAY_OP
LOAD_OP1(read_from_array, CPU);
#endif
...@@ -176,6 +176,8 @@ LoDTensor LodExpand(const LoDTensor &source, const LoD &lod, size_t level) { ...@@ -176,6 +176,8 @@ LoDTensor LodExpand(const LoDTensor &source, const LoD &lod, size_t level) {
return tensor; return tensor;
} }
using LoDTensorArray = std::vector<LoDTensor>;
// Get the absolute offset of a lod[start_level][start_idx:end_idx] and // Get the absolute offset of a lod[start_level][start_idx:end_idx] and
// relative length of details for every levels(i.e., [start_level: ]). // relative length of details for every levels(i.e., [start_level: ]).
// //
......
...@@ -41,10 +41,8 @@ OpDesc::OpDesc(PaddleMobile__Framework__Proto__OpDesc *desc) { ...@@ -41,10 +41,8 @@ OpDesc::OpDesc(PaddleMobile__Framework__Proto__OpDesc *desc) {
for (int k = 0; k < desc->n_attrs; ++k) { for (int k = 0; k < desc->n_attrs; ++k) {
PaddleMobile__Framework__Proto__OpDesc__Attr *attr = desc->attrs[k]; PaddleMobile__Framework__Proto__OpDesc__Attr *attr = desc->attrs[k];
std::string attr_name(attr->name); std::string attr_name(attr->name);
if (attr->type != PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK) {
attrs_[attr_name] = Attribute::GetAttrValue(attr); attrs_[attr_name] = Attribute::GetAttrValue(attr);
} }
}
} }
const std::vector<std::string> &OpDesc::Input(const std::string &name) const { const std::vector<std::string> &OpDesc::Input(const std::string &name) const {
......
...@@ -90,7 +90,8 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_loadQualified( ...@@ -90,7 +90,8 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_loadQualified(
#ifdef ENABLE_EXCEPTION #ifdef ENABLE_EXCEPTION
try { try {
isLoadOk = getPaddleMobileInstance()->Load( isLoadOk = getPaddleMobileInstance()->Load(
jstring2cppstring(env, modelPath), optimize, qualified); jstring2cppstring(env, modelPath), optimize, qualified, 1,
static_cast<bool>(lodMode));
} catch (paddle_mobile::PaddleMobileException &e) { } catch (paddle_mobile::PaddleMobileException &e) {
ANDROIDLOGE("jni got an PaddleMobileException! ", e.what()); ANDROIDLOGE("jni got an PaddleMobileException! ", e.what());
isLoadOk = false; isLoadOk = false;
...@@ -116,7 +117,7 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_loadCombined( ...@@ -116,7 +117,7 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_loadCombined(
try { try {
isLoadOk = getPaddleMobileInstance()->Load( isLoadOk = getPaddleMobileInstance()->Load(
jstring2cppstring(env, modelPath), jstring2cppstring(env, paramPath), jstring2cppstring(env, modelPath), jstring2cppstring(env, paramPath),
optimize); optimize, false, 1, static_cast<bool>(lodMode));
} catch (paddle_mobile::PaddleMobileException &e) { } catch (paddle_mobile::PaddleMobileException &e) {
ANDROIDLOGE("jni got an PaddleMobileException! ", e.what()); ANDROIDLOGE("jni got an PaddleMobileException! ", e.what());
isLoadOk = false; isLoadOk = false;
...@@ -142,7 +143,7 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_loadCombinedQualified( ...@@ -142,7 +143,7 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_loadCombinedQualified(
try { try {
isLoadOk = getPaddleMobileInstance()->Load( isLoadOk = getPaddleMobileInstance()->Load(
jstring2cppstring(env, modelPath), jstring2cppstring(env, paramPath), jstring2cppstring(env, modelPath), jstring2cppstring(env, paramPath),
optimize, qualified); optimize, qualified, 1, static_cast<bool>(lodMode));
} catch (paddle_mobile::PaddleMobileException &e) { } catch (paddle_mobile::PaddleMobileException &e) {
ANDROIDLOGE("jni got an PaddleMobileException! ", e.what()); ANDROIDLOGE("jni got an PaddleMobileException! ", e.what());
isLoadOk = false; isLoadOk = false;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/controlflow/tensor_array_read_write_op.h"
namespace paddle_mobile {
namespace operators {
#ifdef WRITE_TO_ARRAY_OP
template <typename Dtype, typename T>
void WriteToArrayOp<Dtype, T>::InferShape() const {}
#endif // WRITE_TO_ARRAY_OP
#ifdef READ_FROM_ARRAY_OP
template <typename Dtype, typename T>
void ReadFromArrayOp<Dtype, T>::InferShape() const {}
#endif // READ_FROM_ARRAY_OP
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
#ifdef WRITE_TO_ARRAY_OP
REGISTER_OPERATOR_CPU(write_to_array, ops::WriteToArrayOp);
#endif // WRITE_TO_ARRAY_OP
#ifdef READ_FROM_ARRAY_OP
REGISTER_OPERATOR_CPU(read_from_array, ops::ReadFromArrayOp);
#endif // READ_FROM_ARRAY_OP
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/tensor_array_read_write_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef WRITE_TO_ARRAY_OP
DECLARE_OPERATOR(WriteToArray, WriteToArrayParam, WriteToArrayKernel);
#endif // WRITE_TO_ARRAY_OP
#ifdef READ_FROM_ARRAY_OP
DECLARE_OPERATOR(ReadFromArray, ReadFromArrayParam, ReadFromArrayKernel);
#endif // WRITE_TO_ARRAY_OP
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/controlflow/while_op.h"
namespace paddle_mobile {
namespace operators {
#ifdef WHILE_OP
template <typename Dtype, typename T>
void WhileOp<Dtype, T>::InferShape() const {
// TODO(hjchen2)
}
#endif // WHILE_OP
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
#ifdef WHILE_OP
REGISTER_OPERATOR_CPU(while, ops::WhileOp);
#endif // WHILE_OP
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/while_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef WHILE_OP
DECLARE_OPERATOR(While, WhileParam, WhileKernel);
#endif // WHILE_OP
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/kernel/tensor_array_read_write_kernel.h"
namespace paddle_mobile {
namespace operators {
#ifdef WRITE_TO_ARRAY_OP
template <>
bool WriteToArrayKernel<CPU, float>::Init(WriteToArrayParam<CPU> *param) {
return true;
}
template <>
void WriteToArrayKernel<CPU, float>::Compute(
const WriteToArrayParam<CPU> &param) {
int64_t offset = param.index_->data<int64_t>()[0];
if (offset >= param.output_->size()) {
param.output_->resize(offset);
}
framework::LoDTensor *out_tensor = &(param.output_->at(offset));
out_tensor->set_lod(param.input_->lod());
if (param.input_->memory_size() > 0) {
TensorCopy(*(param.input_), out_tensor);
}
}
#endif // WRITE_TO_ARRAY_OP
#ifdef READ_FROM_ARRAY_OP
template <>
bool ReadFromArrayKernel<CPU, float>::Init(ReadFromArrayParam<CPU> *param) {
return true;
}
template <>
void ReadFromArrayKernel<CPU, float>::Compute(
const ReadFromArrayParam<CPU> &param) {
int64_t offset = param.index_->data<int64_t>()[0];
if (offset < param.input_->size()) {
TensorCopy(param.input_->at(offset), param.output_);
}
}
#endif // READ_FROM_ARRAY_OP
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/kernel/while_kernel.h"
namespace paddle_mobile {
namespace operators {
#ifdef WHILE_OP
template <>
bool WhileKernel<CPU, float>::Init(WhileParam<CPU> *param) {
return true;
}
template <>
void WhileKernel<CPU, float>::Compute(const WhileParam<CPU> &param) {
// TODO(hjchen2)
}
#endif // WHILE_OP
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef WRITE_TO_ARRAY_OP
DECLARE_KERNEL(WriteToArray, WriteToArrayParam);
#endif // WRITE_TO_ARRAY_OP
#ifdef READ_FROM_ARRAY_OP
DECLARE_KERNEL(ReadFromArray, ReadFromArrayParam);
#endif // READ_FROM_ARRAY_OP
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef WHILE_OP
template <typename Dtype>
class WhileParam : public OpParam {
public:
WhileParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope)
: inputs_(inputs), outputs_(outputs), scope_(scope) {
cond_ =
OpParam::GetVarValue<framework::LoDTensor>("Condition", inputs, scope);
sub_block_ = OpParam::GetAttr<int>("sub_block", attrs);
}
public:
framework::LoDTensor *cond_;
int sub_block_;
const VariableNameMap inputs_;
const VariableNameMap outputs_;
const Scope scope_;
};
DECLARE_KERNEL(While, WhileParam);
#endif // WHILE_OP
} // namespace operators
} // namespace paddle_mobile
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <cstring>
#include <string> #include <string>
#include "common/log.h" #include "common/log.h"
#include "memory/t_malloc.h" #include "memory/t_malloc.h"
......
...@@ -2991,5 +2991,63 @@ class LogicalUnaryParam : public OpParam { ...@@ -2991,5 +2991,63 @@ class LogicalUnaryParam : public OpParam {
}; };
#endif // LOGICAL_NOT_OP #endif // LOGICAL_NOT_OP
// #ifdef WHILE_OP
// template <typename Dtype>
// class WhileParam : public OpParam {
// public:
// WhileParam(const VariableNameMap &inputs,
// const VariableNameMap &outputs, const AttributeMap &attrs,
// const Scope &scope) {
// cond_ = OpParam::GetVarValue<framework::LoDTensor>("Condition", inputs,
// scope); block_desc_ = OpParam::GetAttr<framework::BlockDesc
// *>("sub_block", attrs);
// }
//
// public:
// framework::LoDTensor *cond_;
// const framework::BlockDesc *block_desc_;
// };
// #endif // WHILE_OP
#ifdef WRITE_TO_ARRAY_OP
template <typename Dtype>
class WriteToArrayParam : public OpParam {
public:
WriteToArrayParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_ = OpParam::GetVarValue<framework::LoDTensor>("X", inputs, scope);
index_ = OpParam::GetVarValue<framework::LoDTensor>("I", inputs, scope);
output_ =
OpParam::GetVarValue<framework::LoDTensorArray>("Out", outputs, scope);
}
public:
framework::LoDTensor *input_;
framework::LoDTensor *index_;
framework::LoDTensorArray *output_;
};
#endif
#ifdef READ_FROM_ARRAY_OP
template <typename Dtype>
class ReadFromArrayParam : public OpParam {
public:
ReadFromArrayParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_ =
OpParam::GetVarValue<framework::LoDTensorArray>("X", inputs, scope);
index_ = OpParam::GetVarValue<framework::LoDTensor>("I", inputs, scope);
output_ = OpParam::GetVarValue<framework::LoDTensor>("Out", outputs, scope);
}
public:
framework::LoDTensorArray *input_;
framework::LoDTensor *index_;
framework::LoDTensor *output_;
};
#endif
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
文件模式从 100755 更改为 100644
...@@ -285,6 +285,9 @@ if(NOT FOUND_MATCH) ...@@ -285,6 +285,9 @@ if(NOT FOUND_MATCH)
set(LOGICAL_OR_OP ON) set(LOGICAL_OR_OP ON)
set(LOGICAL_NOT_OP ON) set(LOGICAL_NOT_OP ON)
set(LOGICAL_XOR_OP ON) set(LOGICAL_XOR_OP ON)
set(WHILE_OP ON)
set(WRITE_TO_ARRAY_OP ON)
set(READ_FROM_ARRAY_OP ON)
endif() endif()
# option(BATCHNORM_OP "" ON) # option(BATCHNORM_OP "" ON)
...@@ -559,3 +562,13 @@ endif() ...@@ -559,3 +562,13 @@ endif()
if (FUSION_DECONVADDRELU_OP) if (FUSION_DECONVADDRELU_OP)
add_definitions(-DFUSION_DECONVADDRELU_OP) add_definitions(-DFUSION_DECONVADDRELU_OP)
endif() endif()
if (WHILE_OP)
add_definitions(-DWHILE_OP)
endif()
if (WRITE_TO_ARRAY_OP)
add_definitions(-DWRITE_TO_ARRAY_OP)
endif()
if (READ_FROM_ARRAY_OP)
add_definitions(-DREAD_FROM_ARRAY_OP)
endif()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册