提交 45d169f1 编写于 作者: xiebaiyuan's avatar xiebaiyuan 提交者: GitHub

Merge branch 'develop' into ci_build

......@@ -77,6 +77,10 @@ const char *G_OP_TYPE_CAST = "cast";
const char *G_OP_TYPE_LOG = "log";
const char *G_OP_TYPE_LOD_RESET = "lod_reset";
const char *G_OP_TYPE_LESS_THAN = "less_than";
const char *G_OP_TYPE_LOGICAL_AND = "logical_and";
const char *G_OP_TYPE_LOGICAL_OR = "logical_or";
const char *G_OP_TYPE_LOGICAL_NOT = "logical_not";
const char *G_OP_TYPE_LOGICAL_XOR = "logical_xor";
const char *G_OP_TYPE_QUANTIZE = "quantize";
const char *G_OP_TYPE_DEQUANTIZE = "dequantize";
......@@ -181,5 +185,9 @@ std::unordered_map<
{G_OP_TYPE_NORM, {{"X"}, {"Out", "Norm"}}},
{G_OP_TYPE_LOG, {{"X"}, {"Out"}}},
{G_OP_TYPE_LOD_RESET, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LESS_THAN, {{"X", "Y"}, {"Out"}}}};
{G_OP_TYPE_LESS_THAN, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_AND, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_OR, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_XOR, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_NOT, {{"X"}, {"Out"}}}};
} // namespace paddle_mobile
......@@ -131,9 +131,12 @@ extern const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU;
extern const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU;
extern const char *G_OP_TYPE_FUSION_CONV_BN_RELU;
extern const char *G_OP_TYPE_GRU;
extern const char *G_OP_TYPE_GRU_UNIT;
extern const char *G_OP_TYPE_LRN;
extern const char *G_OP_TYPE_MUL;
extern const char *G_OP_TYPE_MULTICLASS_NMS;
extern const char *G_OP_TYPE_NORM;
extern const char *G_OP_TYPE_POOL2D;
extern const char *G_OP_TYPE_PRIOR_BOX;
extern const char *G_OP_TYPE_RELU;
......@@ -163,6 +166,10 @@ extern const char *G_OP_TYPE_CAST;
extern const char *G_OP_TYPE_LOG;
extern const char *G_OP_TYPE_LOD_RESET;
extern const char *G_OP_TYPE_LESS_THAN;
extern const char *G_OP_TYPE_LOGICAL_AND;
extern const char *G_OP_TYPE_LOGICAL_OR;
extern const char *G_OP_TYPE_LOGICAL_NOT;
extern const char *G_OP_TYPE_LOGICAL_XOR;
extern const char *G_OP_TYPE_QUANTIZE;
extern const char *G_OP_TYPE_DEQUANTIZE;
......
......@@ -168,6 +168,9 @@ LOAD_FUSION_MATCHER(fusion_conv_bn_relu);
#ifdef GRU_OP
LOAD_OP1(gru, CPU);
#endif
#ifdef GRU_UNIT_OP
LOAD_OP1(gru_unit, CPU);
#endif
#ifdef FUSION_CONVADDBN_OP
LOAD_OP2(fusion_conv_add_bn, CPU, FPGA);
LOAD_FUSION_MATCHER(fusion_conv_add_bn);
......@@ -189,6 +192,9 @@ LOAD_OP1(crf_decoding, CPU);
#ifdef MUL_OP
LOAD_OP2(mul, CPU, MALI_GPU);
#endif
#ifdef NORM_OP
LOAD_OP1(norm, CPU);
#endif
#ifdef RELU_OP
LOAD_OP2(relu, CPU, MALI_GPU);
LOAD_OP1(relu6, CPU);
......@@ -279,3 +285,15 @@ LOAD_OP1(lod_reset, CPU);
#ifdef LESS_THAN_OP
LOAD_OP1(less_than, CPU);
#endif
#ifdef LOGICAL_AND_OP
LOAD_OP1(logical_and, CPU);
#endif
#ifdef LOGICAL_OR_OP
LOAD_OP1(logical_or, CPU);
#endif
#ifdef LOGICAL_NOT_OP
LOAD_OP1(logical_not, CPU);
#endif
#ifdef LOGICAL_XOR_OP
LOAD_OP1(logical_xor, CPU);
#endif
......@@ -44,6 +44,11 @@
*/
@property (assign, nonatomic) BOOL optimize;
/**
@b 是否预测时初始化内存,用于处理可变输入
*/
@property (assign, nonatomic) BOOL loadWhenPredict;
@end
@interface PaddleMobileCPU : NSObject
......
......@@ -73,6 +73,8 @@ static std::mutex shared_mutex;
- (instancetype)initWithConfig:(PaddleMobileCPUConfig *)config {
if (self = [super init]) {
paddle_mobile::PaddleMobileConfigInternal configInternal;
configInternal.load_when_predict = config.loadWhenPredict;
pam_ = new paddle_mobile::PaddleMobile<paddle_mobile::CPU, float>();
_config = config;
}
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/kernel/logical_kernel.h"
namespace paddle_mobile {
namespace operators {
template <typename T>
struct LogicalAndFunctor {
bool operator()(const T& a, const T& b) const { return a && b; }
};
template <typename T>
struct LogicalOrFunctor {
bool operator()(const T& a, const T& b) const { return a || b; }
};
template <typename T>
struct LogicalNotFunctor {
bool operator()(const T& a) const { return !a; }
};
template <typename T>
struct LogicalXorFunctor {
bool operator()(const T& a, const T& b) const {
return (a || b) && !(a && b);
}
};
template <typename T, typename Functor>
void UnaryLogicalCompute(const Tensor* inputX, Tensor* output) {
Functor func;
std::transform(inputX->data<T>(), inputX->data<T>() + inputX->numel(),
output->data<T>(), func);
}
template <typename T, typename Functor>
void BinaryLogicalCompute(const Tensor* inputX, const Tensor* inputY,
Tensor* output) {
Functor func;
std::transform(inputX->data<T>(), inputX->data<T>() + inputX->numel(),
inputY->data<T>(), output->data<T>(), func);
}
#ifdef LOGICAL_AND_OP
template <>
bool LogicalAndKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true;
}
template <>
void LogicalAndKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* inputY = param.InputY();
auto* out = param.Out();
out->mutable_data<bool>();
BinaryLogicalCompute<bool, LogicalAndFunctor<bool>>(inputX, inputY, out);
}
#endif
#ifdef LOGICAL_OR_OP
template <>
bool LogicalOrKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true;
}
template <>
void LogicalOrKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* inputY = param.InputY();
auto* out = param.Out();
out->mutable_data<bool>();
BinaryLogicalCompute<bool, LogicalOrFunctor<bool>>(inputX, inputY, out);
}
#endif
#ifdef LOGICAL_NOT_OP
template <>
bool LogicalNotKernel<CPU, float>::Init(LogicalUnaryParam<CPU>* param) {
return true;
}
template <>
void LogicalNotKernel<CPU, float>::Compute(
const LogicalUnaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* out = param.Out();
out->mutable_data<bool>();
UnaryLogicalCompute<bool, LogicalNotFunctor<bool>>(inputX, out);
}
#endif
#ifdef LOGICAL_XOR_OP
template <>
bool LogicalXorKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true;
}
template <>
void LogicalXorKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* inputY = param.InputY();
auto* out = param.Out();
out->mutable_data<bool>();
BinaryLogicalCompute<bool, LogicalXorFunctor<bool>>(inputX, inputY, out);
}
#endif
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef LOGICAL_AND_OP
DECLARE_KERNEL(LogicalAnd, LogicalBinaryParam);
#endif
#ifdef LOGICAL_OR_OP
DECLARE_KERNEL(LogicalOr, LogicalBinaryParam);
#endif
#ifdef LOGICAL_NOT_OP
DECLARE_KERNEL(LogicalNot, LogicalUnaryParam);
#endif
#ifdef LOGICAL_XOR_OP
DECLARE_KERNEL(LogicalXor, LogicalBinaryParam);
#endif
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/logical_op.h"
namespace paddle_mobile {
namespace operators {
#define DEFINE_LOGICAL_INFERSHAPE(OpName) \
template <typename Dtype, typename T> \
void OpName##Op<Dtype, T>::InferShape() const { \
const auto &input_dims = this->param_.InputX()->dims(); \
this->param_.Out()->Resize(input_dims); \
}
#ifdef LOGICAL_AND_OP
DEFINE_LOGICAL_INFERSHAPE(LogicalAnd);
#endif // TLOGICAL_AND_OP
#ifdef LOGICAL_OR_OP
DEFINE_LOGICAL_INFERSHAPE(LogicalOr);
#endif // TLOGICAL_OR_OP
#ifdef LOGICAL_NOT_OP
DEFINE_LOGICAL_INFERSHAPE(LogicalNot);
#endif // LOGICAL_NOT_OP
#ifdef LOGICAL_XOR_OP
DEFINE_LOGICAL_INFERSHAPE(LogicalXor);
#endif // TLOGICAL_XOR_OP
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef LOGICAL_AND_OP
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(logical_and, ops::LogicalAndOp);
#endif
#endif // LOGICAL_AND_OP
#ifdef LOGICAL_OR_OP
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(logical_or, ops::LogicalOrOp);
#endif
#endif // LOGICAL_OR_OP
#ifdef LOGICAL_NOT_OP
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(logical_not, ops::LogicalNotOp);
#endif
#endif // LOGICAL_NOT_OP
#ifdef LOGICAL_XOR_OP
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(logical_xor, ops::LogicalXorOp);
#endif
#endif // LOGICAL_XOR_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/logical_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef LOGICAL_AND_OP
DECLARE_OPERATOR(LogicalAnd, LogicalBinaryParam, LogicalAndKernel);
#endif
#ifdef LOGICAL_OR_OP
DECLARE_OPERATOR(LogicalOr, LogicalBinaryParam, LogicalOrKernel);
#endif
#ifdef LOGICAL_NOT_OP
DECLARE_OPERATOR(LogicalNot, LogicalUnaryParam, LogicalNotKernel);
#endif
#ifdef LOGICAL_XOR_OP
DECLARE_OPERATOR(LogicalXor, LogicalBinaryParam, LogicalXorKernel);
#endif
} // namespace operators
} // namespace paddle_mobile
......@@ -2942,5 +2942,54 @@ class CompareParam : public OpParam {
};
#endif // LESS_THAN_OP
#if defined(LOGICAL_AND_OP) || defined(LOGICAL_OR_OP) || defined(LOGICAL_XOR_OP)
template <typename Dtype>
class LogicalBinaryParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalBinaryParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope);
input_y_ = InputYFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
}
const GType *InputX() const { return input_x_; }
const GType *InputY() const { return input_y_; }
GType *Out() const { return output_; }
public:
GType *input_x_;
GType *input_y_;
GType *output_;
};
#endif // LOGICAL_AND_OP LOGICAL_OR_OP LOGICAL_XOR_OP
#ifdef LOGICAL_NOT_OP
template <typename Dtype>
class LogicalUnaryParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalUnaryParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
}
const GType *InputX() const { return input_x_; }
GType *Out() const { return output_; }
public:
GType *input_x_;
GType *output_;
};
#endif // LOGICAL_NOT_OP
} // namespace operators
} // namespace paddle_mobile
......@@ -421,4 +421,20 @@ if (NOT FOUND_MATCH)
ADD_EXECUTABLE(test-vgg16ssd net/test_vgg16ssd.cpp test_helper.h test_include.h)
target_link_libraries(test-vgg16ssd paddle-mobile)
# gen test
ADD_EXECUTABLE(test-logical-and-op operators/test_logical_and_op.cpp test_helper.h test_include.h)
target_link_libraries(test-logical-and-op paddle-mobile)
# gen test
ADD_EXECUTABLE(test-logical-or-op operators/test_logical_or_op.cpp test_helper.h test_include.h)
target_link_libraries(test-logical-or-op paddle-mobile)
# gen test
ADD_EXECUTABLE(test-logical-not-op operators/test_logical_not_op.cpp test_helper.h test_include.h)
target_link_libraries(test-logical-not-op paddle-mobile)
# gen test
ADD_EXECUTABLE(test-logical-xor-op operators/test_logical_xor_op.cpp test_helper.h test_include.h)
target_link_libraries(test-logical-xor-op paddle-mobile)
endif ()
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../test_include.h"
#include "operators/logical_op.h"
namespace paddle_mobile {
void LogicalAnd(const framework::Tensor *inputX,
const framework::Tensor *inputY, framework::Tensor *output) {
auto x_data = inputX->data<bool>();
auto y_data = inputY->data<bool>();
auto output_data = output->data<bool>();
for (int i = 0; i < inputX->numel(); ++i) {
*output_data = *x_data && *y_data;
x_data++;
y_data++;
output_data++;
}
}
int TestLogicalAndOp(const std::vector<int> input_shape) {
framework::DDim input_dims = framework::make_ddim(input_shape);
VariableNameMap inputs;
VariableNameMap outputs;
auto scope = std::make_shared<framework::Scope>();
inputs["X"] = std::vector<std::string>({"inputX"});
inputs["Y"] = std::vector<std::string>({"inputY"});
outputs["Out"] = std::vector<std::string>({"output"});
auto x_var = scope.get()->Var("inputX");
auto x = x_var->template GetMutable<framework::LoDTensor>();
SetupTensor<bool>(x, input_dims, 0, 1);
auto y_var = scope.get()->Var("inputY");
auto y = y_var->template GetMutable<framework::LoDTensor>();
SetupTensor<bool>(y, input_dims, 0, 1);
auto output_var = scope.get()->Var("output");
framework::AttributeMap attrs;
auto *op = new operators::LogicalAndOp<CPU, float>("logical_and", inputs,
outputs, attrs, scope);
op->InferShape();
op->Init();
op->Run();
auto output = output_var->template Get<framework::LoDTensor>();
framework::Tensor output_cmp;
bool *output_cmp_data = output_cmp.mutable_data<bool>(output->dims());
LogicalAnd(x, y, &output_cmp);
const bool *output_data = output->data<bool>();
for (int i = 0; i < output->numel(); ++i) {
if (output_data[i] != output_cmp_data[i]) {
LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i]
<< ", output_cmp_data[" << i
<< "] = " << output_cmp_data[i];
delete op;
exit(1);
}
}
}
} // namespace paddle_mobile
int main() {
paddle_mobile::TestLogicalAndOp({1, 1, 2, 3});
paddle_mobile::TestLogicalAndOp({1, 3, 11, 12});
paddle_mobile::TestLogicalAndOp({1, 16, 32, 32});
DLOG << "test logical_and op pass.";
return 0;
}
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../test_include.h"
#include "operators/logical_op.h"
namespace paddle_mobile {
void LogicalNot(const framework::Tensor *inputX, framework::Tensor *output) {
auto x_data = inputX->data<bool>();
auto output_data = output->data<bool>();
for (int i = 0; i < inputX->numel(); ++i) {
*output_data = !*x_data;
x_data++;
output_data++;
}
}
int TestLogicalNotOp(const std::vector<int> input_shape) {
framework::DDim input_dims = framework::make_ddim(input_shape);
VariableNameMap inputs;
VariableNameMap outputs;
auto scope = std::make_shared<framework::Scope>();
inputs["X"] = std::vector<std::string>({"inputX"});
outputs["Out"] = std::vector<std::string>({"output"});
auto x_var = scope.get()->Var("inputX");
auto x = x_var->template GetMutable<framework::LoDTensor>();
SetupTensor<bool>(x, input_dims, 0, 1);
auto output_var = scope.get()->Var("output");
framework::AttributeMap attrs;
auto *op = new operators::LogicalNotOp<CPU, float>("logical_not", inputs,
outputs, attrs, scope);
op->InferShape();
op->Init();
op->Run();
auto output = output_var->template Get<framework::LoDTensor>();
framework::Tensor output_cmp;
bool *output_cmp_data = output_cmp.mutable_data<bool>(output->dims());
LogicalNot(x, &output_cmp);
const bool *output_data = output->data<bool>();
for (int i = 0; i < output->numel(); ++i) {
if (output_data[i] != output_cmp_data[i]) {
LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i]
<< ", output_cmp_data[" << i
<< "] = " << output_cmp_data[i];
delete op;
exit(1);
}
}
}
} // namespace paddle_mobile
int main() {
paddle_mobile::TestLogicalNotOp({1, 1, 2, 3});
paddle_mobile::TestLogicalNotOp({1, 3, 11, 12});
paddle_mobile::TestLogicalNotOp({1, 16, 32, 32});
DLOG << "test logical_not op pass.";
return 0;
}
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../test_include.h"
#include "operators/logical_op.h"
namespace paddle_mobile {
void LogicalOr(const framework::Tensor *inputX, const framework::Tensor *inputY,
framework::Tensor *output) {
auto x_data = inputX->data<bool>();
auto y_data = inputY->data<bool>();
auto output_data = output->data<bool>();
for (int i = 0; i < inputX->numel(); ++i) {
*output_data = *x_data || *y_data;
x_data++;
y_data++;
output_data++;
}
}
int TestLogicalOrOp(const std::vector<int> input_shape) {
framework::DDim input_dims = framework::make_ddim(input_shape);
VariableNameMap inputs;
VariableNameMap outputs;
auto scope = std::make_shared<framework::Scope>();
inputs["X"] = std::vector<std::string>({"inputX"});
inputs["Y"] = std::vector<std::string>({"inputY"});
outputs["Out"] = std::vector<std::string>({"output"});
auto x_var = scope.get()->Var("inputX");
auto x = x_var->template GetMutable<framework::LoDTensor>();
SetupTensor<bool>(x, input_dims, 0, 1);
auto y_var = scope.get()->Var("inputY");
auto y = y_var->template GetMutable<framework::LoDTensor>();
SetupTensor<bool>(y, input_dims, 0, 1);
auto output_var = scope.get()->Var("output");
framework::AttributeMap attrs;
auto *op = new operators::LogicalOrOp<CPU, float>("logical_or", inputs,
outputs, attrs, scope);
op->InferShape();
op->Init();
op->Run();
auto output = output_var->template Get<framework::LoDTensor>();
framework::Tensor output_cmp;
bool *output_cmp_data = output_cmp.mutable_data<bool>(output->dims());
LogicalOr(x, y, &output_cmp);
const bool *output_data = output->data<bool>();
for (int i = 0; i < output->numel(); ++i) {
if (output_data[i] != output_cmp_data[i]) {
LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i]
<< ", output_cmp_data[" << i
<< "] = " << output_cmp_data[i];
delete op;
exit(1);
}
}
}
} // namespace paddle_mobile
int main() {
paddle_mobile::TestLogicalOrOp({1, 1, 2, 3});
paddle_mobile::TestLogicalOrOp({1, 3, 11, 12});
paddle_mobile::TestLogicalOrOp({1, 16, 32, 32});
DLOG << "test logical_or op pass.";
return 0;
}
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../test_include.h"
#include "operators/logical_op.h"
namespace paddle_mobile {
void LogicalXor(const framework::Tensor *inputX,
const framework::Tensor *inputY, framework::Tensor *output) {
auto x_data = inputX->data<bool>();
auto y_data = inputY->data<bool>();
auto output_data = output->data<bool>();
for (int i = 0; i < inputX->numel(); ++i) {
bool x = *x_data;
bool y = *y_data;
*output_data = (x || y) && !(x && y);
x_data++;
y_data++;
output_data++;
}
}
int TestLogicalXorOp(const std::vector<int> input_shape) {
framework::DDim input_dims = framework::make_ddim(input_shape);
VariableNameMap inputs;
VariableNameMap outputs;
auto scope = std::make_shared<framework::Scope>();
inputs["X"] = std::vector<std::string>({"inputX"});
inputs["Y"] = std::vector<std::string>({"inputY"});
outputs["Out"] = std::vector<std::string>({"output"});
auto x_var = scope.get()->Var("inputX");
auto x = x_var->template GetMutable<framework::LoDTensor>();
SetupTensor<bool>(x, input_dims, 0, 1);
auto y_var = scope.get()->Var("inputY");
auto y = y_var->template GetMutable<framework::LoDTensor>();
SetupTensor<bool>(y, input_dims, 0, 1);
auto output_var = scope.get()->Var("output");
framework::AttributeMap attrs;
auto *op = new operators::LogicalXorOp<CPU, float>("logical_xor", inputs,
outputs, attrs, scope);
op->InferShape();
op->Init();
op->Run();
auto output = output_var->template Get<framework::LoDTensor>();
framework::Tensor output_cmp;
bool *output_cmp_data = output_cmp.mutable_data<bool>(output->dims());
LogicalXor(x, y, &output_cmp);
const bool *output_data = output->data<bool>();
for (int i = 0; i < output->numel(); ++i) {
if (output_data[i] != output_cmp_data[i]) {
LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i]
<< ", output_cmp_data[" << i
<< "] = " << output_cmp_data[i];
delete op;
exit(1);
}
}
}
} // namespace paddle_mobile
int main() {
paddle_mobile::TestLogicalXorOp({1, 1, 2, 3});
paddle_mobile::TestLogicalXorOp({1, 3, 11, 12});
paddle_mobile::TestLogicalXorOp({1, 16, 32, 32});
DLOG << "test logical_xor op pass.";
return 0;
}
......@@ -83,6 +83,26 @@ void SetupTensor(paddle_mobile::framework::Tensor *input,
}
}
template <>
void SetupTensor<bool>(paddle_mobile::framework::Tensor *input,
paddle_mobile::framework::DDim dims, bool lower,
bool upper) {
static unsigned int seed = 100;
std::mt19937 rng(seed++);
std::uniform_real_distribution<double> uniform_dist(0, 1);
bool *input_ptr = input->mutable_data<bool>(dims);
if (lower == upper) {
for (int i = 0; i < input->numel(); ++i) {
input_ptr[i] = lower;
}
} else {
for (int i = 0; i < input->numel(); ++i) {
input_ptr[i] = uniform_dist(rng) > 0.5;
}
}
}
template <typename T>
T *CreateInput(Tensor *input, DDim dims, T low, T up) {
SetupTensor<T>(input, dims, static_cast<float>(low), static_cast<float>(up));
......
......@@ -281,6 +281,10 @@ if(NOT FOUND_MATCH)
set(TANH_OP ON)
set(LOD_RESET_OP ON)
set(LESS_THAN_OP ON)
set(LOGICAL_AND_OP ON)
set(LOGICAL_OR_OP ON)
set(LOGICAL_NOT_OP ON)
set(LOGICAL_XOR_OP ON)
endif()
# option(BATCHNORM_OP "" ON)
......@@ -530,6 +534,18 @@ endif()
if (LESS_THAN_OP)
add_definitions(-DLESS_THAN_OP)
endif()
if (LOGICAL_AND_OP)
add_definitions(-DLOGICAL_AND_OP)
endif()
if (LOGICAL_OR_OP)
add_definitions(-DLOGICAL_OR_OP)
endif()
if (LOGICAL_NOT_OP)
add_definitions(-DLOGICAL_NOT_OP)
endif()
if (LOGICAL_XOR_OP)
add_definitions(-DLOGICAL_XOR_OP)
endif()
if (TANH_OP)
add_definitions(-DTANH_OP)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册