diff --git a/src/common/types.cpp b/src/common/types.cpp index d1a1a55a89f69a8d6f195e548b864af8d5bd4e64..18d7330a81c65338f3442b8fdf80545198187db6 100644 --- a/src/common/types.cpp +++ b/src/common/types.cpp @@ -77,6 +77,10 @@ const char *G_OP_TYPE_CAST = "cast"; const char *G_OP_TYPE_LOG = "log"; const char *G_OP_TYPE_LOD_RESET = "lod_reset"; const char *G_OP_TYPE_LESS_THAN = "less_than"; +const char *G_OP_TYPE_LOGICAL_AND = "logical_and"; +const char *G_OP_TYPE_LOGICAL_OR = "logical_or"; +const char *G_OP_TYPE_LOGICAL_NOT = "logical_not"; +const char *G_OP_TYPE_LOGICAL_XOR = "logical_xor"; const char *G_OP_TYPE_QUANTIZE = "quantize"; const char *G_OP_TYPE_DEQUANTIZE = "dequantize"; @@ -181,5 +185,9 @@ std::unordered_map< {G_OP_TYPE_NORM, {{"X"}, {"Out", "Norm"}}}, {G_OP_TYPE_LOG, {{"X"}, {"Out"}}}, {G_OP_TYPE_LOD_RESET, {{"X", "Y"}, {"Out"}}}, - {G_OP_TYPE_LESS_THAN, {{"X", "Y"}, {"Out"}}}}; + {G_OP_TYPE_LESS_THAN, {{"X", "Y"}, {"Out"}}}, + {G_OP_TYPE_LOGICAL_AND, {{"X", "Y"}, {"Out"}}}, + {G_OP_TYPE_LOGICAL_OR, {{"X", "Y"}, {"Out"}}}, + {G_OP_TYPE_LOGICAL_XOR, {{"X", "Y"}, {"Out"}}}, + {G_OP_TYPE_LOGICAL_NOT, {{"X"}, {"Out"}}}}; } // namespace paddle_mobile diff --git a/src/common/types.h b/src/common/types.h index 7e7046dea9e6ebd32380c9ad3f34daabe43dd23d..2927e838fc772a8ccfdcc6212efc09ce8bbbdacd 100644 --- a/src/common/types.h +++ b/src/common/types.h @@ -131,9 +131,12 @@ extern const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU; extern const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU; extern const char *G_OP_TYPE_FUSION_CONV_BN_RELU; +extern const char *G_OP_TYPE_GRU; +extern const char *G_OP_TYPE_GRU_UNIT; extern const char *G_OP_TYPE_LRN; extern const char *G_OP_TYPE_MUL; extern const char *G_OP_TYPE_MULTICLASS_NMS; +extern const char *G_OP_TYPE_NORM; extern const char *G_OP_TYPE_POOL2D; extern const char *G_OP_TYPE_PRIOR_BOX; extern const char *G_OP_TYPE_RELU; @@ -163,6 +166,10 @@ extern const char *G_OP_TYPE_CAST; extern const char *G_OP_TYPE_LOG; extern const char *G_OP_TYPE_LOD_RESET; extern const char *G_OP_TYPE_LESS_THAN; +extern const char *G_OP_TYPE_LOGICAL_AND; +extern const char *G_OP_TYPE_LOGICAL_OR; +extern const char *G_OP_TYPE_LOGICAL_NOT; +extern const char *G_OP_TYPE_LOGICAL_XOR; extern const char *G_OP_TYPE_QUANTIZE; extern const char *G_OP_TYPE_DEQUANTIZE; diff --git a/src/framework/load_ops.h b/src/framework/load_ops.h index 1caefe5ae77c9f4328d4d99af9e0b5c3b408d921..fca5fd82a09984ae32f354205e62fda1e3c20423 100644 --- a/src/framework/load_ops.h +++ b/src/framework/load_ops.h @@ -168,6 +168,9 @@ LOAD_FUSION_MATCHER(fusion_conv_bn_relu); #ifdef GRU_OP LOAD_OP1(gru, CPU); #endif +#ifdef GRU_UNIT_OP +LOAD_OP1(gru_unit, CPU); +#endif #ifdef FUSION_CONVADDBN_OP LOAD_OP2(fusion_conv_add_bn, CPU, FPGA); LOAD_FUSION_MATCHER(fusion_conv_add_bn); @@ -189,6 +192,9 @@ LOAD_OP1(crf_decoding, CPU); #ifdef MUL_OP LOAD_OP2(mul, CPU, MALI_GPU); #endif +#ifdef NORM_OP +LOAD_OP1(norm, CPU); +#endif #ifdef RELU_OP LOAD_OP2(relu, CPU, MALI_GPU); LOAD_OP1(relu6, CPU); @@ -279,3 +285,15 @@ LOAD_OP1(lod_reset, CPU); #ifdef LESS_THAN_OP LOAD_OP1(less_than, CPU); #endif +#ifdef LOGICAL_AND_OP +LOAD_OP1(logical_and, CPU); +#endif +#ifdef LOGICAL_OR_OP +LOAD_OP1(logical_or, CPU); +#endif +#ifdef LOGICAL_NOT_OP +LOAD_OP1(logical_not, CPU); +#endif +#ifdef LOGICAL_XOR_OP +LOAD_OP1(logical_xor, CPU); +#endif diff --git a/src/io/ios_io/PaddleMobileCPU.h b/src/io/ios_io/PaddleMobileCPU.h index d38da70247beb0e91b6c69d3c7a1c8bb407c128e..69e8b894d7b16eefa36259b479902e6185e5a36e 100644 --- a/src/io/ios_io/PaddleMobileCPU.h +++ b/src/io/ios_io/PaddleMobileCPU.h @@ -44,6 +44,11 @@ */ @property (assign, nonatomic) BOOL optimize; +/** + @b 是否预测时初始化内存,用于处理可变输入 + */ +@property (assign, nonatomic) BOOL loadWhenPredict; + @end @interface PaddleMobileCPU : NSObject diff --git a/src/io/ios_io/PaddleMobileCPU.mm b/src/io/ios_io/PaddleMobileCPU.mm index de801292e225da4f1d21886bdc919c2a2fdcdd7c..7103dce16b4eeed8b2e63c93f5dbf4b122f06a84 100644 --- a/src/io/ios_io/PaddleMobileCPU.mm +++ b/src/io/ios_io/PaddleMobileCPU.mm @@ -73,6 +73,8 @@ static std::mutex shared_mutex; - (instancetype)initWithConfig:(PaddleMobileCPUConfig *)config { if (self = [super init]) { + paddle_mobile::PaddleMobileConfigInternal configInternal; + configInternal.load_when_predict = config.loadWhenPredict; pam_ = new paddle_mobile::PaddleMobile(); _config = config; } diff --git a/src/operators/kernel/arm/logical_kernel.cpp b/src/operators/kernel/arm/logical_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3cffcf5c691fde551ad01f42757da8eaae98833e --- /dev/null +++ b/src/operators/kernel/arm/logical_kernel.cpp @@ -0,0 +1,125 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "operators/kernel/logical_kernel.h" + +namespace paddle_mobile { +namespace operators { + +template +struct LogicalAndFunctor { + bool operator()(const T& a, const T& b) const { return a && b; } +}; + +template +struct LogicalOrFunctor { + bool operator()(const T& a, const T& b) const { return a || b; } +}; + +template +struct LogicalNotFunctor { + bool operator()(const T& a) const { return !a; } +}; + +template +struct LogicalXorFunctor { + bool operator()(const T& a, const T& b) const { + return (a || b) && !(a && b); + } +}; + +template +void UnaryLogicalCompute(const Tensor* inputX, Tensor* output) { + Functor func; + std::transform(inputX->data(), inputX->data() + inputX->numel(), + output->data(), func); +} + +template +void BinaryLogicalCompute(const Tensor* inputX, const Tensor* inputY, + Tensor* output) { + Functor func; + std::transform(inputX->data(), inputX->data() + inputX->numel(), + inputY->data(), output->data(), func); +} + +#ifdef LOGICAL_AND_OP +template <> +bool LogicalAndKernel::Init(LogicalBinaryParam* param) { + return true; +} + +template <> +void LogicalAndKernel::Compute( + const LogicalBinaryParam& param) { + auto* inputX = param.InputX(); + auto* inputY = param.InputY(); + auto* out = param.Out(); + out->mutable_data(); + BinaryLogicalCompute>(inputX, inputY, out); +} +#endif + +#ifdef LOGICAL_OR_OP +template <> +bool LogicalOrKernel::Init(LogicalBinaryParam* param) { + return true; +} + +template <> +void LogicalOrKernel::Compute( + const LogicalBinaryParam& param) { + auto* inputX = param.InputX(); + auto* inputY = param.InputY(); + auto* out = param.Out(); + out->mutable_data(); + BinaryLogicalCompute>(inputX, inputY, out); +} +#endif + +#ifdef LOGICAL_NOT_OP +template <> +bool LogicalNotKernel::Init(LogicalUnaryParam* param) { + return true; +} + +template <> +void LogicalNotKernel::Compute( + const LogicalUnaryParam& param) { + auto* inputX = param.InputX(); + auto* out = param.Out(); + out->mutable_data(); + UnaryLogicalCompute>(inputX, out); +} +#endif + +#ifdef LOGICAL_XOR_OP +template <> +bool LogicalXorKernel::Init(LogicalBinaryParam* param) { + return true; +} + +template <> +void LogicalXorKernel::Compute( + const LogicalBinaryParam& param) { + auto* inputX = param.InputX(); + auto* inputY = param.InputY(); + auto* out = param.Out(); + out->mutable_data(); + BinaryLogicalCompute>(inputX, inputY, out); +} +#endif + +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/kernel/logical_kernel.h b/src/operators/kernel/logical_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..8c49669fa8f276b28e4d3b50db16937f766f70a1 --- /dev/null +++ b/src/operators/kernel/logical_kernel.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "framework/operator.h" +#include "operators/op_param.h" + +namespace paddle_mobile { +namespace operators { + +#ifdef LOGICAL_AND_OP +DECLARE_KERNEL(LogicalAnd, LogicalBinaryParam); +#endif + +#ifdef LOGICAL_OR_OP +DECLARE_KERNEL(LogicalOr, LogicalBinaryParam); +#endif + +#ifdef LOGICAL_NOT_OP +DECLARE_KERNEL(LogicalNot, LogicalUnaryParam); +#endif + +#ifdef LOGICAL_XOR_OP +DECLARE_KERNEL(LogicalXor, LogicalBinaryParam); +#endif + +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/logical_op.cpp b/src/operators/logical_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6478516be088960be8f91e106c21129c8de774f5 --- /dev/null +++ b/src/operators/logical_op.cpp @@ -0,0 +1,69 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "operators/logical_op.h" + +namespace paddle_mobile { +namespace operators { + +#define DEFINE_LOGICAL_INFERSHAPE(OpName) \ + template \ + void OpName##Op::InferShape() const { \ + const auto &input_dims = this->param_.InputX()->dims(); \ + this->param_.Out()->Resize(input_dims); \ + } + +#ifdef LOGICAL_AND_OP +DEFINE_LOGICAL_INFERSHAPE(LogicalAnd); +#endif // TLOGICAL_AND_OP + +#ifdef LOGICAL_OR_OP +DEFINE_LOGICAL_INFERSHAPE(LogicalOr); +#endif // TLOGICAL_OR_OP + +#ifdef LOGICAL_NOT_OP +DEFINE_LOGICAL_INFERSHAPE(LogicalNot); +#endif // LOGICAL_NOT_OP + +#ifdef LOGICAL_XOR_OP +DEFINE_LOGICAL_INFERSHAPE(LogicalXor); +#endif // TLOGICAL_XOR_OP + +} // namespace operators +} // namespace paddle_mobile + +namespace ops = paddle_mobile::operators; +#ifdef LOGICAL_AND_OP +#ifdef PADDLE_MOBILE_CPU +REGISTER_OPERATOR_CPU(logical_and, ops::LogicalAndOp); +#endif +#endif // LOGICAL_AND_OP + +#ifdef LOGICAL_OR_OP +#ifdef PADDLE_MOBILE_CPU +REGISTER_OPERATOR_CPU(logical_or, ops::LogicalOrOp); +#endif +#endif // LOGICAL_OR_OP + +#ifdef LOGICAL_NOT_OP +#ifdef PADDLE_MOBILE_CPU +REGISTER_OPERATOR_CPU(logical_not, ops::LogicalNotOp); +#endif +#endif // LOGICAL_NOT_OP + +#ifdef LOGICAL_XOR_OP +#ifdef PADDLE_MOBILE_CPU +REGISTER_OPERATOR_CPU(logical_xor, ops::LogicalXorOp); +#endif +#endif // LOGICAL_XOR_OP diff --git a/src/operators/logical_op.h b/src/operators/logical_op.h new file mode 100644 index 0000000000000000000000000000000000000000..a3cd2fb605f3f081e06eee740d9c47873a29ca97 --- /dev/null +++ b/src/operators/logical_op.h @@ -0,0 +1,42 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "framework/operator.h" +#include "operators/kernel/logical_kernel.h" +#include "operators/op_param.h" + +namespace paddle_mobile { +namespace operators { + +#ifdef LOGICAL_AND_OP +DECLARE_OPERATOR(LogicalAnd, LogicalBinaryParam, LogicalAndKernel); +#endif + +#ifdef LOGICAL_OR_OP +DECLARE_OPERATOR(LogicalOr, LogicalBinaryParam, LogicalOrKernel); +#endif + +#ifdef LOGICAL_NOT_OP +DECLARE_OPERATOR(LogicalNot, LogicalUnaryParam, LogicalNotKernel); +#endif + +#ifdef LOGICAL_XOR_OP +DECLARE_OPERATOR(LogicalXor, LogicalBinaryParam, LogicalXorKernel); +#endif + +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/op_param.h b/src/operators/op_param.h index ed036ab7c2a191a0924e3c2d6c6ad61e6de79bd4..1f20f5505668e79a8453d9b14ef25478fc7e3af7 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -2942,5 +2942,54 @@ class CompareParam : public OpParam { }; #endif // LESS_THAN_OP +#if defined(LOGICAL_AND_OP) || defined(LOGICAL_OR_OP) || defined(LOGICAL_XOR_OP) +template +class LogicalBinaryParam : public OpParam { + typedef typename DtypeTensorTrait::gtype GType; + typedef typename DtypeTensorTrait::rtype RType; + + public: + LogicalBinaryParam(const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs, + const Scope &scope) { + input_x_ = InputXFrom(inputs, scope); + input_y_ = InputYFrom(inputs, scope); + output_ = OutFrom(outputs, scope); + } + + const GType *InputX() const { return input_x_; } + const GType *InputY() const { return input_y_; } + GType *Out() const { return output_; } + + public: + GType *input_x_; + GType *input_y_; + GType *output_; +}; +#endif // LOGICAL_AND_OP LOGICAL_OR_OP LOGICAL_XOR_OP + +#ifdef LOGICAL_NOT_OP +template +class LogicalUnaryParam : public OpParam { + typedef typename DtypeTensorTrait::gtype GType; + typedef typename DtypeTensorTrait::rtype RType; + + public: + LogicalUnaryParam(const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs, + const Scope &scope) { + input_x_ = InputXFrom(inputs, scope); + output_ = OutFrom(outputs, scope); + } + + const GType *InputX() const { return input_x_; } + GType *Out() const { return output_; } + + public: + GType *input_x_; + GType *output_; +}; +#endif // LOGICAL_NOT_OP + } // namespace operators } // namespace paddle_mobile diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 3128c003e2ef367b625ee03ea562f686eef9324e..23634f33fe01dbfbc994f48a522c30c966fc7087 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -421,4 +421,20 @@ if (NOT FOUND_MATCH) ADD_EXECUTABLE(test-vgg16ssd net/test_vgg16ssd.cpp test_helper.h test_include.h) target_link_libraries(test-vgg16ssd paddle-mobile) + # gen test + ADD_EXECUTABLE(test-logical-and-op operators/test_logical_and_op.cpp test_helper.h test_include.h) + target_link_libraries(test-logical-and-op paddle-mobile) + + # gen test + ADD_EXECUTABLE(test-logical-or-op operators/test_logical_or_op.cpp test_helper.h test_include.h) + target_link_libraries(test-logical-or-op paddle-mobile) + + # gen test + ADD_EXECUTABLE(test-logical-not-op operators/test_logical_not_op.cpp test_helper.h test_include.h) + target_link_libraries(test-logical-not-op paddle-mobile) + + # gen test + ADD_EXECUTABLE(test-logical-xor-op operators/test_logical_xor_op.cpp test_helper.h test_include.h) + target_link_libraries(test-logical-xor-op paddle-mobile) + endif () diff --git a/test/operators/test_logical_and_op.cpp b/test/operators/test_logical_and_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..216513cf3d7f64c865bf0931abe6a9dad2d2582d --- /dev/null +++ b/test/operators/test_logical_and_op.cpp @@ -0,0 +1,84 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "../test_include.h" +#include "operators/logical_op.h" + +namespace paddle_mobile { + +void LogicalAnd(const framework::Tensor *inputX, + const framework::Tensor *inputY, framework::Tensor *output) { + auto x_data = inputX->data(); + auto y_data = inputY->data(); + auto output_data = output->data(); + for (int i = 0; i < inputX->numel(); ++i) { + *output_data = *x_data && *y_data; + x_data++; + y_data++; + output_data++; + } +} + +int TestLogicalAndOp(const std::vector input_shape) { + framework::DDim input_dims = framework::make_ddim(input_shape); + VariableNameMap inputs; + VariableNameMap outputs; + auto scope = std::make_shared(); + inputs["X"] = std::vector({"inputX"}); + inputs["Y"] = std::vector({"inputY"}); + outputs["Out"] = std::vector({"output"}); + + auto x_var = scope.get()->Var("inputX"); + auto x = x_var->template GetMutable(); + SetupTensor(x, input_dims, 0, 1); + + auto y_var = scope.get()->Var("inputY"); + auto y = y_var->template GetMutable(); + SetupTensor(y, input_dims, 0, 1); + + auto output_var = scope.get()->Var("output"); + framework::AttributeMap attrs; + + auto *op = new operators::LogicalAndOp("logical_and", inputs, + outputs, attrs, scope); + + op->InferShape(); + op->Init(); + op->Run(); + + auto output = output_var->template Get(); + framework::Tensor output_cmp; + bool *output_cmp_data = output_cmp.mutable_data(output->dims()); + LogicalAnd(x, y, &output_cmp); + + const bool *output_data = output->data(); + for (int i = 0; i < output->numel(); ++i) { + if (output_data[i] != output_cmp_data[i]) { + LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i] + << ", output_cmp_data[" << i + << "] = " << output_cmp_data[i]; + delete op; + exit(1); + } + } +} +} // namespace paddle_mobile + +int main() { + paddle_mobile::TestLogicalAndOp({1, 1, 2, 3}); + paddle_mobile::TestLogicalAndOp({1, 3, 11, 12}); + paddle_mobile::TestLogicalAndOp({1, 16, 32, 32}); + DLOG << "test logical_and op pass."; + return 0; +} diff --git a/test/operators/test_logical_not_op.cpp b/test/operators/test_logical_not_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..55d48f79b72a05a74b6d13e4095f25ddfb4e8cbd --- /dev/null +++ b/test/operators/test_logical_not_op.cpp @@ -0,0 +1,76 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "../test_include.h" +#include "operators/logical_op.h" + +namespace paddle_mobile { + +void LogicalNot(const framework::Tensor *inputX, framework::Tensor *output) { + auto x_data = inputX->data(); + auto output_data = output->data(); + for (int i = 0; i < inputX->numel(); ++i) { + *output_data = !*x_data; + x_data++; + output_data++; + } +} + +int TestLogicalNotOp(const std::vector input_shape) { + framework::DDim input_dims = framework::make_ddim(input_shape); + VariableNameMap inputs; + VariableNameMap outputs; + auto scope = std::make_shared(); + inputs["X"] = std::vector({"inputX"}); + outputs["Out"] = std::vector({"output"}); + + auto x_var = scope.get()->Var("inputX"); + auto x = x_var->template GetMutable(); + SetupTensor(x, input_dims, 0, 1); + + auto output_var = scope.get()->Var("output"); + framework::AttributeMap attrs; + + auto *op = new operators::LogicalNotOp("logical_not", inputs, + outputs, attrs, scope); + + op->InferShape(); + op->Init(); + op->Run(); + + auto output = output_var->template Get(); + framework::Tensor output_cmp; + bool *output_cmp_data = output_cmp.mutable_data(output->dims()); + LogicalNot(x, &output_cmp); + + const bool *output_data = output->data(); + for (int i = 0; i < output->numel(); ++i) { + if (output_data[i] != output_cmp_data[i]) { + LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i] + << ", output_cmp_data[" << i + << "] = " << output_cmp_data[i]; + delete op; + exit(1); + } + } +} +} // namespace paddle_mobile + +int main() { + paddle_mobile::TestLogicalNotOp({1, 1, 2, 3}); + paddle_mobile::TestLogicalNotOp({1, 3, 11, 12}); + paddle_mobile::TestLogicalNotOp({1, 16, 32, 32}); + DLOG << "test logical_not op pass."; + return 0; +} diff --git a/test/operators/test_logical_or_op.cpp b/test/operators/test_logical_or_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..593ee35e696ebc392496846d8beb244210d1ec88 --- /dev/null +++ b/test/operators/test_logical_or_op.cpp @@ -0,0 +1,84 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "../test_include.h" +#include "operators/logical_op.h" + +namespace paddle_mobile { + +void LogicalOr(const framework::Tensor *inputX, const framework::Tensor *inputY, + framework::Tensor *output) { + auto x_data = inputX->data(); + auto y_data = inputY->data(); + auto output_data = output->data(); + for (int i = 0; i < inputX->numel(); ++i) { + *output_data = *x_data || *y_data; + x_data++; + y_data++; + output_data++; + } +} + +int TestLogicalOrOp(const std::vector input_shape) { + framework::DDim input_dims = framework::make_ddim(input_shape); + VariableNameMap inputs; + VariableNameMap outputs; + auto scope = std::make_shared(); + inputs["X"] = std::vector({"inputX"}); + inputs["Y"] = std::vector({"inputY"}); + outputs["Out"] = std::vector({"output"}); + + auto x_var = scope.get()->Var("inputX"); + auto x = x_var->template GetMutable(); + SetupTensor(x, input_dims, 0, 1); + + auto y_var = scope.get()->Var("inputY"); + auto y = y_var->template GetMutable(); + SetupTensor(y, input_dims, 0, 1); + + auto output_var = scope.get()->Var("output"); + framework::AttributeMap attrs; + + auto *op = new operators::LogicalOrOp("logical_or", inputs, + outputs, attrs, scope); + + op->InferShape(); + op->Init(); + op->Run(); + + auto output = output_var->template Get(); + framework::Tensor output_cmp; + bool *output_cmp_data = output_cmp.mutable_data(output->dims()); + LogicalOr(x, y, &output_cmp); + + const bool *output_data = output->data(); + for (int i = 0; i < output->numel(); ++i) { + if (output_data[i] != output_cmp_data[i]) { + LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i] + << ", output_cmp_data[" << i + << "] = " << output_cmp_data[i]; + delete op; + exit(1); + } + } +} +} // namespace paddle_mobile + +int main() { + paddle_mobile::TestLogicalOrOp({1, 1, 2, 3}); + paddle_mobile::TestLogicalOrOp({1, 3, 11, 12}); + paddle_mobile::TestLogicalOrOp({1, 16, 32, 32}); + DLOG << "test logical_or op pass."; + return 0; +} diff --git a/test/operators/test_logical_xor_op.cpp b/test/operators/test_logical_xor_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b4ca4c826727a6493ce785f4fab97a4dfa809557 --- /dev/null +++ b/test/operators/test_logical_xor_op.cpp @@ -0,0 +1,86 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "../test_include.h" +#include "operators/logical_op.h" + +namespace paddle_mobile { + +void LogicalXor(const framework::Tensor *inputX, + const framework::Tensor *inputY, framework::Tensor *output) { + auto x_data = inputX->data(); + auto y_data = inputY->data(); + auto output_data = output->data(); + for (int i = 0; i < inputX->numel(); ++i) { + bool x = *x_data; + bool y = *y_data; + *output_data = (x || y) && !(x && y); + x_data++; + y_data++; + output_data++; + } +} + +int TestLogicalXorOp(const std::vector input_shape) { + framework::DDim input_dims = framework::make_ddim(input_shape); + VariableNameMap inputs; + VariableNameMap outputs; + auto scope = std::make_shared(); + inputs["X"] = std::vector({"inputX"}); + inputs["Y"] = std::vector({"inputY"}); + outputs["Out"] = std::vector({"output"}); + + auto x_var = scope.get()->Var("inputX"); + auto x = x_var->template GetMutable(); + SetupTensor(x, input_dims, 0, 1); + + auto y_var = scope.get()->Var("inputY"); + auto y = y_var->template GetMutable(); + SetupTensor(y, input_dims, 0, 1); + + auto output_var = scope.get()->Var("output"); + framework::AttributeMap attrs; + + auto *op = new operators::LogicalXorOp("logical_xor", inputs, + outputs, attrs, scope); + + op->InferShape(); + op->Init(); + op->Run(); + + auto output = output_var->template Get(); + framework::Tensor output_cmp; + bool *output_cmp_data = output_cmp.mutable_data(output->dims()); + LogicalXor(x, y, &output_cmp); + + const bool *output_data = output->data(); + for (int i = 0; i < output->numel(); ++i) { + if (output_data[i] != output_cmp_data[i]) { + LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i] + << ", output_cmp_data[" << i + << "] = " << output_cmp_data[i]; + delete op; + exit(1); + } + } +} +} // namespace paddle_mobile + +int main() { + paddle_mobile::TestLogicalXorOp({1, 1, 2, 3}); + paddle_mobile::TestLogicalXorOp({1, 3, 11, 12}); + paddle_mobile::TestLogicalXorOp({1, 16, 32, 32}); + DLOG << "test logical_xor op pass."; + return 0; +} diff --git a/test/test_helper.h b/test/test_helper.h index a760fa7ff05a528be9109b6589c625cce208e9c7..652283494f48b1cc1c1b87a791a5111bd9386fd4 100644 --- a/test/test_helper.h +++ b/test/test_helper.h @@ -83,6 +83,26 @@ void SetupTensor(paddle_mobile::framework::Tensor *input, } } +template <> +void SetupTensor(paddle_mobile::framework::Tensor *input, + paddle_mobile::framework::DDim dims, bool lower, + bool upper) { + static unsigned int seed = 100; + std::mt19937 rng(seed++); + std::uniform_real_distribution uniform_dist(0, 1); + + bool *input_ptr = input->mutable_data(dims); + if (lower == upper) { + for (int i = 0; i < input->numel(); ++i) { + input_ptr[i] = lower; + } + } else { + for (int i = 0; i < input->numel(); ++i) { + input_ptr[i] = uniform_dist(rng) > 0.5; + } + } +} + template T *CreateInput(Tensor *input, DDim dims, T low, T up) { SetupTensor(input, dims, static_cast(low), static_cast(up)); diff --git a/tools/op.cmake b/tools/op.cmake index b14dfdacf6051d6edc57934cc25841f346e9d0df..cf68f2449113589fe99e2278a7d4ba5b62e19c13 100644 --- a/tools/op.cmake +++ b/tools/op.cmake @@ -281,6 +281,10 @@ if(NOT FOUND_MATCH) set(TANH_OP ON) set(LOD_RESET_OP ON) set(LESS_THAN_OP ON) + set(LOGICAL_AND_OP ON) + set(LOGICAL_OR_OP ON) + set(LOGICAL_NOT_OP ON) + set(LOGICAL_XOR_OP ON) endif() # option(BATCHNORM_OP "" ON) @@ -530,6 +534,18 @@ endif() if (LESS_THAN_OP) add_definitions(-DLESS_THAN_OP) endif() +if (LOGICAL_AND_OP) + add_definitions(-DLOGICAL_AND_OP) +endif() +if (LOGICAL_OR_OP) + add_definitions(-DLOGICAL_OR_OP) +endif() +if (LOGICAL_NOT_OP) + add_definitions(-DLOGICAL_NOT_OP) +endif() +if (LOGICAL_XOR_OP) + add_definitions(-DLOGICAL_XOR_OP) +endif() if (TANH_OP) add_definitions(-DTANH_OP)