提交 546f8a23 编写于 作者: L liutuo

fix issue 149 google C++ coding style

上级 cf234f2a
...@@ -62,6 +62,8 @@ std::unique_ptr<OperatorBase> OperatorRegistry::CreateOperator( ...@@ -62,6 +62,8 @@ std::unique_ptr<OperatorBase> OperatorRegistry::CreateOperator(
} }
} }
namespace ops {
extern void Register_Activation(OperatorRegistry *op_registry); extern void Register_Activation(OperatorRegistry *op_registry);
extern void Register_AddN(OperatorRegistry *op_registry); extern void Register_AddN(OperatorRegistry *op_registry);
extern void Register_BatchNorm(OperatorRegistry *op_registry); extern void Register_BatchNorm(OperatorRegistry *op_registry);
...@@ -88,32 +90,34 @@ extern void Register_Eltwise(OperatorRegistry *op_registry); ...@@ -88,32 +90,34 @@ extern void Register_Eltwise(OperatorRegistry *op_registry);
extern void Register_FullyConnected(OperatorRegistry *op_registry); extern void Register_FullyConnected(OperatorRegistry *op_registry);
extern void Register_Slice(OperatorRegistry *op_registry); extern void Register_Slice(OperatorRegistry *op_registry);
} // namespace ops
OperatorRegistry::OperatorRegistry() { OperatorRegistry::OperatorRegistry() {
Register_Activation(this); ops::Register_Activation(this);
Register_AddN(this); ops::Register_AddN(this);
Register_BatchNorm(this); ops::Register_BatchNorm(this);
Register_BatchToSpaceND(this); ops::Register_BatchToSpaceND(this);
Register_BiasAdd(this); ops::Register_BiasAdd(this);
Register_BufferToImage(this); ops::Register_BufferToImage(this);
Register_ChannelShuffle(this); ops::Register_ChannelShuffle(this);
Register_Concat(this); ops::Register_Concat(this);
Register_Conv2D(this); ops::Register_Conv2D(this);
Register_DepthwiseConv2d(this); ops::Register_DepthwiseConv2d(this);
Register_FoldedBatchNorm(this); ops::Register_FoldedBatchNorm(this);
Register_FusedConv2D(this); ops::Register_FusedConv2D(this);
Register_GlobalAvgPooling(this); ops::Register_GlobalAvgPooling(this);
Register_ImageToBuffer(this); ops::Register_ImageToBuffer(this);
Register_Pooling(this); ops::Register_Pooling(this);
Register_ResizeBilinear(this); ops::Register_ResizeBilinear(this);
Register_Softmax(this); ops::Register_Softmax(this);
Register_SpaceToBatchND(this); ops::Register_SpaceToBatchND(this);
Register_MatMul(this); ops::Register_MatMul(this);
Register_WinogradTransform(this); ops::Register_WinogradTransform(this);
Register_WinogradInverseTransform(this); ops::Register_WinogradInverseTransform(this);
Register_Reshape(this); ops::Register_Reshape(this);
Register_Eltwise(this); ops::Register_Eltwise(this);
Register_FullyConnected(this); ops::Register_FullyConnected(this);
Register_Slice(this); ops::Register_Slice(this);
} }
} // namespace mace } // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/activation.h" #include "mace/ops/activation.h"
namespace mace { namespace mace {
namespace ops {
void Register_Activation(OperatorRegistry *op_registry) { void Register_Activation(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Activation") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Activation")
...@@ -26,4 +27,5 @@ void Register_Activation(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_Activation(OperatorRegistry *op_registry) {
ActivationOp<DeviceType::OPENCL, half>); ActivationOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,10 +5,13 @@ ...@@ -5,10 +5,13 @@
#ifndef MACE_OPS_ACTIVATION_H_ #ifndef MACE_OPS_ACTIVATION_H_
#define MACE_OPS_ACTIVATION_H_ #define MACE_OPS_ACTIVATION_H_
#include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/activation.h" #include "mace/kernels/activation.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class ActivationOp : public Operator<D, T> { class ActivationOp : public Operator<D, T> {
...@@ -36,6 +39,7 @@ class ActivationOp : public Operator<D, T> { ...@@ -36,6 +39,7 @@ class ActivationOp : public Operator<D, T> {
kernels::ActivationFunctor<D, T> functor_; kernels::ActivationFunctor<D, T> functor_;
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_ACTIVATION_H_ #endif // MACE_OPS_ACTIVATION_H_
...@@ -3,11 +3,15 @@ ...@@ -3,11 +3,15 @@
// //
#include <string> #include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h" #include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void ReluBenchmark( static void ReluBenchmark(
int iters, int batch, int channels, int height, int width) { int iters, int batch, int channels, int height, int width) {
...@@ -316,4 +320,6 @@ BM_SIGMOID(1, 3, 512, 512); ...@@ -316,4 +320,6 @@ BM_SIGMOID(1, 3, 512, 512);
BM_SIGMOID(1, 32, 112, 112); BM_SIGMOID(1, 32, 112, 112);
BM_SIGMOID(1, 64, 256, 256); BM_SIGMOID(1, 64, 256, 256);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class ActivationOpTest : public OpsTestBase {}; class ActivationOpTest : public OpsTestBase {};
...@@ -365,4 +367,6 @@ TEST_F(ActivationOpTest, OPENCLSimpleSigmoid) { ...@@ -365,4 +367,6 @@ TEST_F(ActivationOpTest, OPENCLSimpleSigmoid) {
TestSimpleSigmoid<DeviceType::OPENCL>(); TestSimpleSigmoid<DeviceType::OPENCL>();
} }
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/addn.h" #include "mace/ops/addn.h"
namespace mace { namespace mace {
namespace ops {
void Register_AddN(OperatorRegistry *op_registry) { void Register_AddN(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("AddN") REGISTER_OPERATOR(op_registry, OpKeyBuilder("AddN")
...@@ -26,4 +27,5 @@ void Register_AddN(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_AddN(OperatorRegistry *op_registry) {
AddNOp<DeviceType::OPENCL, half>); AddNOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "mace/kernels/addn.h" #include "mace/kernels/addn.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class AddNOp : public Operator<D, T> { class AddNOp : public Operator<D, T> {
...@@ -40,6 +41,7 @@ class AddNOp : public Operator<D, T> { ...@@ -40,6 +41,7 @@ class AddNOp : public Operator<D, T> {
kernels::AddNFunctor<D, T> functor_; kernels::AddNFunctor<D, T> functor_;
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_ADDN_H_ #endif // MACE_OPS_ADDN_H_
...@@ -3,11 +3,15 @@ ...@@ -3,11 +3,15 @@
// //
#include <string> #include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h" #include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void AddNBenchmark(int iters, int inputs, int n, int h, int w, int c) { static void AddNBenchmark(int iters, int inputs, int n, int h, int w, int c) {
mace::testing::StopTiming(); mace::testing::StopTiming();
...@@ -75,4 +79,6 @@ BM_ADDN(4, 1, 128, 128, 3); ...@@ -75,4 +79,6 @@ BM_ADDN(4, 1, 128, 128, 3);
BM_ADDN(2, 1, 256, 256, 3); BM_ADDN(2, 1, 256, 256, 3);
BM_ADDN(2, 1, 512, 512, 3); BM_ADDN(2, 1, 512, 512, 3);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class AddnOpTest : public OpsTestBase {}; class AddnOpTest : public OpsTestBase {};
...@@ -66,11 +68,12 @@ void RandomTest() { ...@@ -66,11 +68,12 @@ void RandomTest() {
for (int round = 0; round < 10; ++round) { for (int round = 0; round < 10; ++round) {
// generate random input // generate random input
index_t n = 1 + (rand() % 5); static unsigned int seed = 123;
index_t h = 1 + (rand() % 100); index_t n = 1 + (rand_r(&seed) % 5);
index_t w = 1 + (rand() % 100); index_t h = 1 + (rand_r(&seed) % 100);
index_t c = 1 + (rand() % 32); index_t w = 1 + (rand_r(&seed) % 100);
int input_num = 2 + rand() % 3; index_t c = 1 + (rand_r(&seed) % 32);
int input_num = 2 + rand_r(&seed) % 3;
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
auto op_def = OpDefBuilder("AddN", "AddNTest"); auto op_def = OpDefBuilder("AddN", "AddNTest");
...@@ -117,4 +120,6 @@ void RandomTest() { ...@@ -117,4 +120,6 @@ void RandomTest() {
TEST_F(AddnOpTest, OPENCLRandom) { RandomTest<DeviceType::OPENCL>(); } TEST_F(AddnOpTest, OPENCLRandom) { RandomTest<DeviceType::OPENCL>(); }
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/batch_norm.h" #include "mace/ops/batch_norm.h"
namespace mace { namespace mace {
namespace ops {
void Register_BatchNorm(OperatorRegistry *op_registry) { void Register_BatchNorm(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("BatchNorm") REGISTER_OPERATOR(op_registry, OpKeyBuilder("BatchNorm")
...@@ -26,4 +27,5 @@ void Register_BatchNorm(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_BatchNorm(OperatorRegistry *op_registry) {
BatchNormOp<DeviceType::OPENCL, half>); BatchNormOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "mace/kernels/batch_norm.h" #include "mace/kernels/batch_norm.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class BatchNormOp : public Operator<D, T> { class BatchNormOp : public Operator<D, T> {
...@@ -55,6 +56,7 @@ class BatchNormOp : public Operator<D, T> { ...@@ -55,6 +56,7 @@ class BatchNormOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_BATCH_NORM_H_ #endif // MACE_OPS_BATCH_NORM_H_
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void BatchNorm( static void BatchNorm(
int iters, int batch, int channels, int height, int width) { int iters, int batch, int channels, int height, int width) {
...@@ -101,4 +104,6 @@ BM_BATCH_NORM(1, 1024, 7, 7); ...@@ -101,4 +104,6 @@ BM_BATCH_NORM(1, 1024, 7, 7);
BM_BATCH_NORM(32, 1, 256, 256); BM_BATCH_NORM(32, 1, 256, 256);
BM_BATCH_NORM(32, 3, 256, 256); BM_BATCH_NORM(32, 3, 256, 256);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class BatchNormOpTest : public OpsTestBase {}; class BatchNormOpTest : public OpsTestBase {};
...@@ -75,11 +77,12 @@ TEST_F(BatchNormOpTest, SimpleCPU) { Simple<DeviceType::CPU>(); } ...@@ -75,11 +77,12 @@ TEST_F(BatchNormOpTest, SimpleCPU) { Simple<DeviceType::CPU>(); }
TEST_F(BatchNormOpTest, SimpleOPENCL) { Simple<DeviceType::OPENCL>(); } TEST_F(BatchNormOpTest, SimpleOPENCL) { Simple<DeviceType::OPENCL>(); }
TEST_F(BatchNormOpTest, SimpleRandomOPENCL) { TEST_F(BatchNormOpTest, SimpleRandomOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 64; index_t height = 64;
index_t width = 64; index_t width = 64;
...@@ -147,11 +150,12 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) { ...@@ -147,11 +150,12 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) {
} }
TEST_F(BatchNormOpTest, SimpleRandomHalfOPENCL) { TEST_F(BatchNormOpTest, SimpleRandomHalfOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 64; index_t height = 64;
index_t width = 64; index_t width = 64;
...@@ -220,11 +224,12 @@ TEST_F(BatchNormOpTest, SimpleRandomHalfOPENCL) { ...@@ -220,11 +224,12 @@ TEST_F(BatchNormOpTest, SimpleRandomHalfOPENCL) {
} }
TEST_F(BatchNormOpTest, ComplexRandomOPENCL) { TEST_F(BatchNormOpTest, ComplexRandomOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 103; index_t height = 103;
index_t width = 113; index_t width = 113;
...@@ -292,11 +297,12 @@ TEST_F(BatchNormOpTest, ComplexRandomOPENCL) { ...@@ -292,11 +297,12 @@ TEST_F(BatchNormOpTest, ComplexRandomOPENCL) {
} }
TEST_F(BatchNormOpTest, ComplexRandomHalfOPENCL) { TEST_F(BatchNormOpTest, ComplexRandomHalfOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 103; index_t height = 103;
index_t width = 113; index_t width = 113;
...@@ -363,4 +369,7 @@ TEST_F(BatchNormOpTest, ComplexRandomHalfOPENCL) { ...@@ -363,4 +369,7 @@ TEST_F(BatchNormOpTest, ComplexRandomHalfOPENCL) {
kernels::BufferType::IN_OUT_CHANNEL); kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5); ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5);
} }
}
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/batch_to_space.h" #include "mace/ops/batch_to_space.h"
namespace mace { namespace mace {
namespace ops {
void Register_BatchToSpaceND(OperatorRegistry *op_registry) { void Register_BatchToSpaceND(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("BatchToSpaceND") REGISTER_OPERATOR(op_registry, OpKeyBuilder("BatchToSpaceND")
...@@ -19,4 +20,5 @@ void Register_BatchToSpaceND(OperatorRegistry *op_registry) { ...@@ -19,4 +20,5 @@ void Register_BatchToSpaceND(OperatorRegistry *op_registry) {
BatchToSpaceNDOp<DeviceType::OPENCL, half>); BatchToSpaceNDOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,15 +2,17 @@ ...@@ -2,15 +2,17 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#ifndef MACE_OPS_SPACE_TO_BATCH_H_ #ifndef MACE_OPS_BATCH_TO_SPACE_H_
#define MACE_OPS_SPACE_TO_BATCH_H_ #define MACE_OPS_BATCH_TO_SPACE_H_
#include <memory> #include <memory>
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/space_to_batch.h" #include "mace/kernels/space_to_batch.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class BatchToSpaceNDOp : public Operator<D, T> { class BatchToSpaceNDOp : public Operator<D, T> {
...@@ -68,6 +70,7 @@ class BatchToSpaceNDOp : public Operator<D, T> { ...@@ -68,6 +70,7 @@ class BatchToSpaceNDOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_SPACE_TO_BATCH_H_ #endif // MACE_OPS_BATCH_TO_SPACE_H_
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void BMBatchToSpace( static void BMBatchToSpace(
int iters, int batch, int channels, int height, int width, int arg) { int iters, int batch, int channels, int height, int width, int arg) {
...@@ -53,4 +56,7 @@ static void BMBatchToSpace( ...@@ -53,4 +56,7 @@ static void BMBatchToSpace(
BM_BATCH_TO_SPACE(128, 8, 8, 128, 2); BM_BATCH_TO_SPACE(128, 8, 8, 128, 2);
BM_BATCH_TO_SPACE(4, 128, 128, 32, 2); BM_BATCH_TO_SPACE(4, 128, 128, 32, 2);
BM_BATCH_TO_SPACE(16, 64, 64, 32, 4); BM_BATCH_TO_SPACE(16, 64, 64, 32, 4);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/bias_add.h" #include "mace/ops/bias_add.h"
namespace mace { namespace mace {
namespace ops {
void Register_BiasAdd(OperatorRegistry *op_registry) { void Register_BiasAdd(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("BiasAdd") REGISTER_OPERATOR(op_registry, OpKeyBuilder("BiasAdd")
...@@ -26,4 +27,5 @@ void Register_BiasAdd(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_BiasAdd(OperatorRegistry *op_registry) {
BiasAddOp<DeviceType::OPENCL, half>); BiasAddOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,13 +2,14 @@ ...@@ -2,13 +2,14 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#ifndef MACE_BIAS_ADD_H_ #ifndef MACE_OPS_BIAS_ADD_H_
#define MACE_BIAS_ADD_H_ #define MACE_OPS_BIAS_ADD_H_
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/bias_add.h" #include "mace/kernels/bias_add.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class BiasAddOp : public Operator<D, T> { class BiasAddOp : public Operator<D, T> {
...@@ -40,6 +41,7 @@ class BiasAddOp : public Operator<D, T> { ...@@ -40,6 +41,7 @@ class BiasAddOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_BIAS_ADD_H_ #endif // MACE_OPS_BIAS_ADD_H_
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void BiasAdd(int iters, int batch, int channels, int height, int width) { static void BiasAdd(int iters, int batch, int channels, int height, int width) {
mace::testing::StopTiming(); mace::testing::StopTiming();
...@@ -77,4 +80,7 @@ BM_BIAS_ADD(1, 512, 14, 14); ...@@ -77,4 +80,7 @@ BM_BIAS_ADD(1, 512, 14, 14);
BM_BIAS_ADD(1, 1024, 7, 7); BM_BIAS_ADD(1, 1024, 7, 7);
BM_BIAS_ADD(32, 1, 256, 256); BM_BIAS_ADD(32, 1, 256, 256);
BM_BIAS_ADD(32, 3, 256, 256); BM_BIAS_ADD(32, 3, 256, 256);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class BiasAddOpTest : public OpsTestBase {}; class BiasAddOpTest : public OpsTestBase {};
...@@ -60,13 +62,14 @@ TEST_F(BiasAddOpTest, BiasAddSimpleOPENCL) { ...@@ -60,13 +62,14 @@ TEST_F(BiasAddOpTest, BiasAddSimpleOPENCL) {
} }
TEST_F(BiasAddOpTest, SimpleRandomOPENCL) { TEST_F(BiasAddOpTest, SimpleRandomOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t height = 64 + rand() % 50; index_t channels = 3 + rand_r(&seed) % 50;
index_t width = 64 + rand() % 50; index_t height = 64 + rand_r(&seed) % 50;
index_t width = 64 + rand_r(&seed) % 50;
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
...@@ -110,13 +113,14 @@ TEST_F(BiasAddOpTest, SimpleRandomOPENCL) { ...@@ -110,13 +113,14 @@ TEST_F(BiasAddOpTest, SimpleRandomOPENCL) {
} }
TEST_F(BiasAddOpTest, ComplexRandomOPENCL) { TEST_F(BiasAddOpTest, ComplexRandomOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t height = 103 + rand() % 100; index_t channels = 3 + rand_r(&seed) % 50;
index_t width = 113 + rand() % 100; index_t height = 103 + rand_r(&seed) % 100;
index_t width = 113 + rand_r(&seed) % 100;
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
...@@ -158,4 +162,7 @@ TEST_F(BiasAddOpTest, ComplexRandomOPENCL) { ...@@ -158,4 +162,7 @@ TEST_F(BiasAddOpTest, ComplexRandomOPENCL) {
kernels::BufferType::IN_OUT_CHANNEL); kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2); ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
} }
}
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/buffer_to_image.h" #include "mace/ops/buffer_to_image.h"
namespace mace { namespace mace {
namespace ops {
void Register_BufferToImage(OperatorRegistry *op_registry) { void Register_BufferToImage(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("BufferToImage") REGISTER_OPERATOR(op_registry, OpKeyBuilder("BufferToImage")
...@@ -20,4 +21,5 @@ void Register_BufferToImage(OperatorRegistry *op_registry) { ...@@ -20,4 +21,5 @@ void Register_BufferToImage(OperatorRegistry *op_registry) {
BufferToImageOp<DeviceType::OPENCL, half>); BufferToImageOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "mace/kernels/buffer_to_image.h" #include "mace/kernels/buffer_to_image.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class BufferToImageOp : public Operator<D, T> { class BufferToImageOp : public Operator<D, T> {
...@@ -36,5 +37,6 @@ class BufferToImageOp : public Operator<D, T> { ...@@ -36,5 +37,6 @@ class BufferToImageOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_BUFFER_TO_IMAGE_H_ #endif // MACE_OPS_BUFFER_TO_IMAGE_H_
...@@ -5,7 +5,9 @@ ...@@ -5,7 +5,9 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
void TestBidirectionTransform(const int type, void TestBidirectionTransform(const int type,
...@@ -188,3 +190,7 @@ TEST(BufferToImageTest, ArgStringHalfToHalfSmall) { ...@@ -188,3 +190,7 @@ TEST(BufferToImageTest, ArgStringHalfToHalfSmall) {
TestStringHalfBidirectionTransform<DeviceType::OPENCL, half>( TestStringHalfBidirectionTransform<DeviceType::OPENCL, half>(
kernels::ARGUMENT, {2}, input_data); kernels::ARGUMENT, {2}, input_data);
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/channel_shuffle.h" #include "mace/ops/channel_shuffle.h"
namespace mace { namespace mace {
namespace ops {
void Register_ChannelShuffle(OperatorRegistry *op_registry) { void Register_ChannelShuffle(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("ChannelShuffle") REGISTER_OPERATOR(op_registry, OpKeyBuilder("ChannelShuffle")
...@@ -24,4 +25,5 @@ void Register_ChannelShuffle(OperatorRegistry *op_registry) { ...@@ -24,4 +25,5 @@ void Register_ChannelShuffle(OperatorRegistry *op_registry) {
ChannelShuffleOp<DeviceType::OPENCL, half>); ChannelShuffleOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "mace/kernels/channel_shuffle.h" #include "mace/kernels/channel_shuffle.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class ChannelShuffleOp : public Operator<D, T> { class ChannelShuffleOp : public Operator<D, T> {
...@@ -42,6 +43,7 @@ class ChannelShuffleOp : public Operator<D, T> { ...@@ -42,6 +43,7 @@ class ChannelShuffleOp : public Operator<D, T> {
kernels::ChannelShuffleFunctor<D, T> functor_; kernels::ChannelShuffleFunctor<D, T> functor_;
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_CHANNEL_SHUFFLE_H_ #endif // MACE_OPS_CHANNEL_SHUFFLE_H_
...@@ -7,8 +7,10 @@ ...@@ -7,8 +7,10 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template<DeviceType D, typename T> template <DeviceType D, typename T>
static void ChannelShuffle( static void ChannelShuffle(
int iters, int batch, int channels, int height, int width, int group) { int iters, int batch, int channels, int height, int width, int group) {
mace::testing::StopTiming(); mace::testing::StopTiming();
...@@ -48,7 +50,8 @@ static void ChannelShuffle( ...@@ -48,7 +50,8 @@ static void ChannelShuffle(
} }
#define BM_CHANNEL_SHUFFLE_MACRO(N, C, H, W, G, TYPE, DEVICE) \ #define BM_CHANNEL_SHUFFLE_MACRO(N, C, H, W, G, TYPE, DEVICE) \
static void BM_CHANNEL_SHUFFLE_##N##_##C##_##H##_##W##_##G##_##TYPE##_##DEVICE( \ static void \
BM_CHANNEL_SHUFFLE_##N##_##C##_##H##_##W##_##G##_##TYPE##_##DEVICE( \
int iters) { \ int iters) { \
const int64_t tot = static_cast<int64_t>(iters) * N * C * H * W; \ const int64_t tot = static_cast<int64_t>(iters) * N * C * H * W; \
mace::testing::MaccProcessed(tot); \ mace::testing::MaccProcessed(tot); \
...@@ -66,4 +69,6 @@ BM_CHANNEL_SHUFFLE(1, 64, 64, 64, 8); ...@@ -66,4 +69,6 @@ BM_CHANNEL_SHUFFLE(1, 64, 64, 64, 8);
BM_CHANNEL_SHUFFLE(1, 64, 128, 128, 8); BM_CHANNEL_SHUFFLE(1, 64, 128, 128, 8);
BM_CHANNEL_SHUFFLE(1, 64, 256, 256, 8); BM_CHANNEL_SHUFFLE(1, 64, 256, 256, 8);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
// //
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class ChannelShuffleOpTest : public OpsTestBase {}; class ChannelShuffleOpTest : public OpsTestBase {};
...@@ -44,7 +47,6 @@ TEST_F(ChannelShuffleOpTest, C16G4_OPENCL) { ...@@ -44,7 +47,6 @@ TEST_F(ChannelShuffleOpTest, C16G4_OPENCL) {
BufferToImage<DeviceType::OPENCL, float>(net, "Input", "InputImage", BufferToImage<DeviceType::OPENCL, float>(net, "Input", "InputImage",
kernels::BufferType::IN_OUT_CHANNEL); kernels::BufferType::IN_OUT_CHANNEL);
OpDefBuilder("ChannelShuffle", "ChannelShuffleTest") OpDefBuilder("ChannelShuffle", "ChannelShuffleTest")
.Input("InputImage") .Input("InputImage")
.Output("OutputImage") .Output("OutputImage")
...@@ -60,8 +62,13 @@ TEST_F(ChannelShuffleOpTest, C16G4_OPENCL) { ...@@ -60,8 +62,13 @@ TEST_F(ChannelShuffleOpTest, C16G4_OPENCL) {
// Check // Check
auto expected = CreateTensor<float>( auto expected = CreateTensor<float>(
{1, 1, 2, 16}, {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15, {1, 1, 2, 16},
{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31}); 16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001); ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/concat.h" #include "mace/ops/concat.h"
namespace mace { namespace mace {
namespace ops {
void Register_Concat(OperatorRegistry *op_registry) { void Register_Concat(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Concat") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Concat")
...@@ -25,4 +26,5 @@ void Register_Concat(OperatorRegistry *op_registry) { ...@@ -25,4 +26,5 @@ void Register_Concat(OperatorRegistry *op_registry) {
ConcatOp<DeviceType::OPENCL, half>); ConcatOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,9 +5,13 @@ ...@@ -5,9 +5,13 @@
#ifndef MACE_OPS_CONCAT_H_ #ifndef MACE_OPS_CONCAT_H_
#define MACE_OPS_CONCAT_H_ #define MACE_OPS_CONCAT_H_
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/concat.h" #include "mace/kernels/concat.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class ConcatOp : public Operator<D, T> { class ConcatOp : public Operator<D, T> {
...@@ -41,6 +45,7 @@ class ConcatOp : public Operator<D, T> { ...@@ -41,6 +45,7 @@ class ConcatOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_CONCAT_H_ #endif // MACE_OPS_CONCAT_H_
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void ConcatHelper(int iters, int concat_dim, int dim1) { static void ConcatHelper(int iters, int concat_dim, int dim1) {
mace::testing::StopTiming(); mace::testing::StopTiming();
...@@ -106,4 +109,6 @@ BM_CONCAT_OPENCL_MACRO(3, 32, 32, 64, half); ...@@ -106,4 +109,6 @@ BM_CONCAT_OPENCL_MACRO(3, 32, 32, 64, half);
BM_CONCAT_OPENCL_MACRO(3, 32, 32, 128, half); BM_CONCAT_OPENCL_MACRO(3, 32, 32, 128, half);
BM_CONCAT_OPENCL_MACRO(3, 32, 32, 256, half); BM_CONCAT_OPENCL_MACRO(3, 32, 32, 256, half);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,11 +2,16 @@ ...@@ -2,11 +2,16 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#include "mace/ops/concat.h" #include <string>
#include <functional>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
#include "mace/ops/concat.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class ConcatOpTest : public OpsTestBase {}; class ConcatOpTest : public OpsTestBase {};
...@@ -87,10 +92,11 @@ TEST_F(ConcatOpTest, CPUSimpleVertical) { ...@@ -87,10 +92,11 @@ TEST_F(ConcatOpTest, CPUSimpleVertical) {
} }
TEST_F(ConcatOpTest, CPURandom) { TEST_F(ConcatOpTest, CPURandom) {
srand(time(nullptr)); // srand(time(nullptr));
static unsigned int seed = 123;
int dim = 5; int dim = 5;
int num_inputs = 2 + rand() % 10; int num_inputs = 2 + rand_r(&seed) % 10;
int axis = rand() % dim; int axis = rand_r(&seed) % dim;
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
auto builder = OpDefBuilder("Concat", "ConcatTest"); auto builder = OpDefBuilder("Concat", "ConcatTest");
...@@ -108,7 +114,7 @@ TEST_F(ConcatOpTest, CPURandom) { ...@@ -108,7 +114,7 @@ TEST_F(ConcatOpTest, CPURandom) {
std::vector<float *> input_ptrs(num_inputs, nullptr); std::vector<float *> input_ptrs(num_inputs, nullptr);
index_t concat_axis_size = 0; index_t concat_axis_size = 0;
for (int i = 0; i < num_inputs; ++i) { for (int i = 0; i < num_inputs; ++i) {
input_shapes[i][axis] = 1 + rand() % dim; input_shapes[i][axis] = 1 + rand_r(&seed) % dim;
concat_axis_size += input_shapes[i][axis]; concat_axis_size += input_shapes[i][axis];
GenerateRandomRealTypeData(input_shapes[i], inputs[i]); GenerateRandomRealTypeData(input_shapes[i], inputs[i]);
input_ptrs[i] = inputs[i].data(); input_ptrs[i] = inputs[i].data();
...@@ -217,3 +223,7 @@ TEST_F(ConcatOpTest, OPENCLAlignedMultiInput) { ...@@ -217,3 +223,7 @@ TEST_F(ConcatOpTest, OPENCLAlignedMultiInput) {
OpenclRandomTest<float>( OpenclRandomTest<float>(
{{3, 32, 32, 32}, {3, 32, 32, 32}, {3, 32, 32, 32}, {3, 32, 32, 32}}, 3); {{3, 32, 32, 32}, {3, 32, 32, 32}, {3, 32, 32, 32}, {3, 32, 32, 32}}, 3);
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/conv_2d.h" #include "mace/ops/conv_2d.h"
namespace mace { namespace mace {
namespace ops {
void Register_Conv2D(OperatorRegistry *op_registry) { void Register_Conv2D(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Conv2D") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Conv2D")
...@@ -26,4 +27,5 @@ void Register_Conv2D(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_Conv2D(OperatorRegistry *op_registry) {
Conv2dOp<DeviceType::OPENCL, half>); Conv2dOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "mace/ops/conv_pool_2d_base.h" #include "mace/ops/conv_pool_2d_base.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class Conv2dOp : public ConvPool2dOpBase<D, T> { class Conv2dOp : public ConvPool2dOpBase<D, T> {
...@@ -44,6 +45,7 @@ class Conv2dOp : public ConvPool2dOpBase<D, T> { ...@@ -44,6 +45,7 @@ class Conv2dOp : public ConvPool2dOpBase<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_CONV_2D_H_ #endif // MACE_OPS_CONV_2D_H_
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void Conv2d(int iters, static void Conv2d(int iters,
...@@ -139,4 +141,6 @@ BM_CONV_2D(1, 32, 256, 256, 3, 3, 1, 4, VALID, 32); ...@@ -139,4 +141,6 @@ BM_CONV_2D(1, 32, 256, 256, 3, 3, 1, 4, VALID, 32);
BM_CONV_2D(1, 128, 56, 56, 1, 1, 1, 1, SAME, 128); BM_CONV_2D(1, 128, 56, 56, 1, 1, 1, 1, SAME, 128);
BM_CONV_2D(1, 1024, 7, 7, 1, 1, 1, 1, SAME, 1024); BM_CONV_2D(1, 1024, 7, 7, 1, 1, 1, 1, SAME, 1024);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,11 +2,15 @@ ...@@ -2,11 +2,15 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#include "mace/ops/conv_2d.h"
#include <fstream> #include <fstream>
#include <vector>
#include "mace/ops/conv_2d.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class Conv2dOpTest : public OpsTestBase {}; class Conv2dOpTest : public OpsTestBase {};
...@@ -347,14 +351,15 @@ static void TestComplexConvNxNS12(const std::vector<index_t> &shape, ...@@ -347,14 +351,15 @@ static void TestComplexConvNxNS12(const std::vector<index_t> &shape,
testing::internal::LogToStderr(); testing::internal::LogToStderr();
auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w, auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w,
Padding type) { Padding type) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 3 + (rand() % 10); static unsigned int seed = 123;
index_t batch = 3 + (rand_r(&seed) % 10);
index_t height = shape[0]; index_t height = shape[0];
index_t width = shape[1]; index_t width = shape[1];
index_t input_channels = shape[2] + (rand() % 10); index_t input_channels = shape[2] + (rand_r(&seed) % 10);
index_t output_channels = shape[3] + (rand() % 10); index_t output_channels = shape[3] + (rand_r(&seed) % 10);
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
OpDefBuilder("Conv2D", "Conv2dTest") OpDefBuilder("Conv2D", "Conv2dTest")
...@@ -729,3 +734,7 @@ TEST_F(Conv2dOpTest, OPENCLAlignedPad2) { ...@@ -729,3 +734,7 @@ TEST_F(Conv2dOpTest, OPENCLAlignedPad2) {
TEST_F(Conv2dOpTest, OPENCLUnalignedPad4) { TEST_F(Conv2dOpTest, OPENCLUnalignedPad4) {
TestArbitraryPadConvNxN<DeviceType::OPENCL, float>({107, 113, 5, 7}, {4, 4}); TestArbitraryPadConvNxN<DeviceType::OPENCL, float>({107, 113, 5, 7}, {4, 4});
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,10 +5,13 @@ ...@@ -5,10 +5,13 @@
#ifndef MACE_OPS_CONV_POOL_2D_BASE_H_ #ifndef MACE_OPS_CONV_POOL_2D_BASE_H_
#define MACE_OPS_CONV_POOL_2D_BASE_H_ #define MACE_OPS_CONV_POOL_2D_BASE_H_
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/conv_pool_2d_util.h" #include "mace/kernels/conv_pool_2d_util.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class ConvPool2dOpBase : public Operator<D, T> { class ConvPool2dOpBase : public Operator<D, T> {
...@@ -29,6 +32,7 @@ class ConvPool2dOpBase : public Operator<D, T> { ...@@ -29,6 +32,7 @@ class ConvPool2dOpBase : public Operator<D, T> {
std::vector<int> dilations_; std::vector<int> dilations_;
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_CONV_POOL_2D_BASE_H_ #endif // MACE_OPS_CONV_POOL_2D_BASE_H_
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
TEST(CoreTest, INIT_MODE) { TEST(CoreTest, INIT_MODE) {
std::vector<OperatorDef> op_defs; std::vector<OperatorDef> op_defs;
...@@ -56,4 +58,6 @@ TEST(CoreTest, INIT_MODE) { ...@@ -56,4 +58,6 @@ TEST(CoreTest, INIT_MODE) {
1e-5); 1e-5);
} }
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/depthwise_conv2d.h" #include "mace/ops/depthwise_conv2d.h"
namespace mace { namespace mace {
namespace ops {
void Register_DepthwiseConv2d(OperatorRegistry *op_registry) { void Register_DepthwiseConv2d(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("DepthwiseConv2d") REGISTER_OPERATOR(op_registry, OpKeyBuilder("DepthwiseConv2d")
...@@ -26,4 +27,5 @@ void Register_DepthwiseConv2d(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_DepthwiseConv2d(OperatorRegistry *op_registry) {
DepthwiseConv2dOp<DeviceType::OPENCL, half>); DepthwiseConv2dOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define MACE_OPS_DEPTHWISE_CONV2D_H_ #define MACE_OPS_DEPTHWISE_CONV2D_H_
#include <memory> #include <memory>
#include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/conv_2d.h" #include "mace/kernels/conv_2d.h"
...@@ -13,6 +14,7 @@ ...@@ -13,6 +14,7 @@
#include "mace/ops/conv_pool_2d_base.h" #include "mace/ops/conv_pool_2d_base.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class DepthwiseConv2dOp : public ConvPool2dOpBase<D, T> { class DepthwiseConv2dOp : public ConvPool2dOpBase<D, T> {
...@@ -48,6 +50,7 @@ class DepthwiseConv2dOp : public ConvPool2dOpBase<D, T> { ...@@ -48,6 +50,7 @@ class DepthwiseConv2dOp : public ConvPool2dOpBase<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_DEPTHWISE_CONV2D_H_ #endif // MACE_OPS_DEPTHWISE_CONV2D_H_
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void DepthwiseConv2d(int iters, static void DepthwiseConv2d(int iters,
...@@ -121,4 +123,6 @@ BM_DEPTHWISE_CONV_2D(1, 64, 33, 31, 3, 3, 2, SAME, 1); ...@@ -121,4 +123,6 @@ BM_DEPTHWISE_CONV_2D(1, 64, 33, 31, 3, 3, 2, SAME, 1);
BM_DEPTHWISE_CONV_2D(1, 3, 512, 512, 3, 3, 2, VALID, 1); BM_DEPTHWISE_CONV_2D(1, 3, 512, 512, 3, 3, 2, VALID, 1);
BM_DEPTHWISE_CONV_2D(1, 3, 512, 512, 3, 3, 2, SAME, 1); BM_DEPTHWISE_CONV_2D(1, 3, 512, 512, 3, 3, 2, SAME, 1);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,9 +5,9 @@ ...@@ -5,9 +5,9 @@
#include "mace/ops/conv_2d.h" #include "mace/ops/conv_2d.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace { namespace test {
class DepthwiseConv2dOpTest : public OpsTestBase {}; class DepthwiseConv2dOpTest : public OpsTestBase {};
...@@ -207,11 +207,12 @@ void TestNxNS12(const index_t height, const index_t width) { ...@@ -207,11 +207,12 @@ void TestNxNS12(const index_t height, const index_t width) {
testing::internal::LogToStderr(); testing::internal::LogToStderr();
auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w, auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w,
Padding type) { Padding type) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 5; static unsigned int seed = 123;
index_t input_channels = 3 + rand() % 16; index_t batch = 1 + rand_r(&seed) % 5;
index_t input_channels = 3 + rand_r(&seed) % 16;
index_t multiplier = 1; index_t multiplier = 1;
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
...@@ -316,4 +317,6 @@ TEST_F(DepthwiseConv2dOpTest, OpenCLUnalignedNxNS12Half) { ...@@ -316,4 +317,6 @@ TEST_F(DepthwiseConv2dOpTest, OpenCLUnalignedNxNS12Half) {
TestNxNS12<DeviceType::OPENCL, half>(107, 113); TestNxNS12<DeviceType::OPENCL, half>(107, 113);
} }
} // namespace } // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/eltwise.h" #include "mace/ops/eltwise.h"
namespace mace { namespace mace {
namespace ops {
void Register_Eltwise(OperatorRegistry *op_registry) { void Register_Eltwise(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Eltwise") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Eltwise")
...@@ -26,4 +27,5 @@ void Register_Eltwise(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_Eltwise(OperatorRegistry *op_registry) {
EltwiseOp<DeviceType::OPENCL, half>); EltwiseOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,13 +2,14 @@ ...@@ -2,13 +2,14 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#ifndef MACE_OPS_RESHAPE_H_ #ifndef MACE_OPS_ELTWISE_H_
#define MACE_OPS_RESHAPE_H_ #define MACE_OPS_ELTWISE_H_
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/eltwise.h" #include "mace/kernels/eltwise.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class EltwiseOp : public Operator<D, T> { class EltwiseOp : public Operator<D, T> {
...@@ -44,6 +45,7 @@ class EltwiseOp : public Operator<D, T> { ...@@ -44,6 +45,7 @@ class EltwiseOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_RESHAPE_H_ #endif // MACE_OPS_ELTWISE_H_
...@@ -2,13 +2,17 @@ ...@@ -2,13 +2,17 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#include "mace/kernels/eltwise.h"
#include <string> #include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h" #include "mace/core/testing/test_benchmark.h"
#include "mace/kernels/eltwise.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void EltwiseBenchmark( static void EltwiseBenchmark(
int iters, kernels::EltwiseType type, int n, int h, int w, int c) { int iters, kernels::EltwiseType type, int n, int h, int w, int c) {
...@@ -81,4 +85,6 @@ BM_ELTWISE(0, 1, 240, 240, 256); ...@@ -81,4 +85,6 @@ BM_ELTWISE(0, 1, 240, 240, 256);
BM_ELTWISE(1, 1, 240, 240, 256); BM_ELTWISE(1, 1, 240, 240, 256);
BM_ELTWISE(2, 1, 240, 240, 256); BM_ELTWISE(2, 1, 240, 240, 256);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class EltwiseOpTest : public OpsTestBase {}; class EltwiseOpTest : public OpsTestBase {};
...@@ -170,4 +172,6 @@ TEST_F(EltwiseOpTest, OPENCLRandomHalf) { ...@@ -170,4 +172,6 @@ TEST_F(EltwiseOpTest, OPENCLRandomHalf) {
{13, 32, 32, 64}); {13, 32, 32, 64});
} }
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/folded_batch_norm.h" #include "mace/ops/folded_batch_norm.h"
namespace mace { namespace mace {
namespace ops {
void Register_FoldedBatchNorm(OperatorRegistry *op_registry) { void Register_FoldedBatchNorm(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("FoldedBatchNorm") REGISTER_OPERATOR(op_registry, OpKeyBuilder("FoldedBatchNorm")
...@@ -26,4 +27,5 @@ void Register_FoldedBatchNorm(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_FoldedBatchNorm(OperatorRegistry *op_registry) {
FoldedBatchNormOp<DeviceType::OPENCL, half>); FoldedBatchNormOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,10 +5,13 @@ ...@@ -5,10 +5,13 @@
#ifndef MACE_OPS_FOLDED_BATCH_NORM_H_ #ifndef MACE_OPS_FOLDED_BATCH_NORM_H_
#define MACE_OPS_FOLDED_BATCH_NORM_H_ #define MACE_OPS_FOLDED_BATCH_NORM_H_
#include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/batch_norm.h" #include "mace/kernels/batch_norm.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class FoldedBatchNormOp : public Operator<D, T> { class FoldedBatchNormOp : public Operator<D, T> {
...@@ -48,6 +51,7 @@ class FoldedBatchNormOp : public Operator<D, T> { ...@@ -48,6 +51,7 @@ class FoldedBatchNormOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_FOLDED_BATCH_NORM_H_ #endif // MACE_OPS_FOLDED_BATCH_NORM_H_
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class FoldedBatchNormOpTest : public OpsTestBase {}; class FoldedBatchNormOpTest : public OpsTestBase {};
...@@ -14,12 +16,12 @@ void CalculateScaleOffset(const std::vector<float> &gamma, ...@@ -14,12 +16,12 @@ void CalculateScaleOffset(const std::vector<float> &gamma,
const std::vector<float> &mean, const std::vector<float> &mean,
const std::vector<float> &var, const std::vector<float> &var,
const float epsilon, const float epsilon,
std::vector<float> &scale, std::vector<float> *scale,
std::vector<float> &offset) { std::vector<float> *offset) {
size_t size = gamma.size(); size_t size = gamma.size();
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
scale[i] = gamma[i] / std::sqrt(var[i] + epsilon); (*scale)[i] = gamma[i] / std::sqrt(var[i] + epsilon);
offset[i] = offset[i] - mean[i] * scale[i]; (*offset)[i] = (*offset)[i] - mean[i] * (*scale)[i];
} }
} }
...@@ -32,7 +34,7 @@ void Simple() { ...@@ -32,7 +34,7 @@ void Simple() {
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}); {5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
std::vector<float> scale(1); std::vector<float> scale(1);
std::vector<float> offset(1); std::vector<float> offset(1);
CalculateScaleOffset({4.0f}, {2.0}, {10}, {11.67f}, 1e-3, scale, offset); CalculateScaleOffset({4.0f}, {2.0}, {10}, {11.67f}, 1e-3, &scale, &offset);
net.AddInputFromArray<D, float>("Scale", {1}, scale); net.AddInputFromArray<D, float>("Scale", {1}, scale);
net.AddInputFromArray<D, float>("Offset", {1}, offset); net.AddInputFromArray<D, float>("Offset", {1}, offset);
...@@ -172,11 +174,12 @@ width}); ...@@ -172,11 +174,12 @@ width});
*/ */
TEST_F(FoldedBatchNormOpTest, SimpleRandomOPENCL) { TEST_F(FoldedBatchNormOpTest, SimpleRandomOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 64; index_t height = 64;
index_t width = 64; index_t width = 64;
...@@ -227,11 +230,11 @@ TEST_F(FoldedBatchNormOpTest, SimpleRandomOPENCL) { ...@@ -227,11 +230,11 @@ TEST_F(FoldedBatchNormOpTest, SimpleRandomOPENCL) {
} }
TEST_F(FoldedBatchNormOpTest, SimpleRandomHalfOPENCL) { TEST_F(FoldedBatchNormOpTest, SimpleRandomHalfOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 64; index_t height = 64;
index_t width = 64; index_t width = 64;
...@@ -283,11 +286,11 @@ TEST_F(FoldedBatchNormOpTest, SimpleRandomHalfOPENCL) { ...@@ -283,11 +286,11 @@ TEST_F(FoldedBatchNormOpTest, SimpleRandomHalfOPENCL) {
} }
TEST_F(FoldedBatchNormOpTest, ComplexRandomOPENCL) { TEST_F(FoldedBatchNormOpTest, ComplexRandomOPENCL) {
srand(time(NULL)); // srand(time(NULL));
static unsigned int seed = 123;
// generate random input // generate random input
index_t batch = 1 + rand() % 10; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand() % 50; index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 103; index_t height = 103;
index_t width = 113; index_t width = 113;
...@@ -337,11 +340,12 @@ TEST_F(FoldedBatchNormOpTest, ComplexRandomOPENCL) { ...@@ -337,11 +340,12 @@ TEST_F(FoldedBatchNormOpTest, ComplexRandomOPENCL) {
} }
TEST_F(FoldedBatchNormOpTest, ComplexRandomHalfOPENCL) { TEST_F(FoldedBatchNormOpTest, ComplexRandomHalfOPENCL) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 1 + rand() % 10; static unsigned int seed = 123;
index_t channels = 3 + rand() % 50; index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t height = 103; index_t height = 103;
index_t width = 113; index_t width = 113;
...@@ -390,4 +394,7 @@ TEST_F(FoldedBatchNormOpTest, ComplexRandomHalfOPENCL) { ...@@ -390,4 +394,7 @@ TEST_F(FoldedBatchNormOpTest, ComplexRandomHalfOPENCL) {
kernels::BufferType::IN_OUT_CHANNEL); kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5); ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5);
} }
}
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/fully_connected.h" #include "mace/ops/fully_connected.h"
namespace mace { namespace mace {
namespace ops {
void Register_FullyConnected(OperatorRegistry *op_registry) { void Register_FullyConnected(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("FC") REGISTER_OPERATOR(op_registry, OpKeyBuilder("FC")
...@@ -26,4 +27,5 @@ void Register_FullyConnected(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_FullyConnected(OperatorRegistry *op_registry) {
FullyConnectedOp<DeviceType::OPENCL, half>); FullyConnectedOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,10 +5,13 @@ ...@@ -5,10 +5,13 @@
#ifndef MACE_OPS_FULLY_CONNECTED_H_ #ifndef MACE_OPS_FULLY_CONNECTED_H_
#define MACE_OPS_FULLY_CONNECTED_H_ #define MACE_OPS_FULLY_CONNECTED_H_
#include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/fully_connected.h" #include "mace/kernels/fully_connected.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class FullyConnectedOp : public Operator<D, T> { class FullyConnectedOp : public Operator<D, T> {
...@@ -46,6 +49,7 @@ class FullyConnectedOp : public Operator<D, T> { ...@@ -46,6 +49,7 @@ class FullyConnectedOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_FULLY_CONNECTED_H_ #endif // MACE_OPS_FULLY_CONNECTED_H_
...@@ -3,11 +3,15 @@ ...@@ -3,11 +3,15 @@
// //
#include <string> #include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h" #include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void FCBenchmark( static void FCBenchmark(
int iters, int batch, int height, int width, int channel, int out_channel) { int iters, int batch, int height, int width, int channel, int out_channel) {
...@@ -83,4 +87,7 @@ BM_FC(1, 16, 16, 32, 32); ...@@ -83,4 +87,7 @@ BM_FC(1, 16, 16, 32, 32);
BM_FC(1, 8, 8, 32, 1000); BM_FC(1, 8, 8, 32, 1000);
BM_FC(1, 2, 2, 512, 2); BM_FC(1, 2, 2, 512, 2);
BM_FC(1, 7, 7, 512, 4096); BM_FC(1, 7, 7, 512, 4096);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -3,10 +3,13 @@ ...@@ -3,10 +3,13 @@
// //
#include <fstream> #include <fstream>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class FullyConnectedOpTest : public OpsTestBase {}; class FullyConnectedOpTest : public OpsTestBase {};
...@@ -263,4 +266,6 @@ TEST_F(FullyConnectedOpTest, OPENCLHalfWidthFormatAligned) { ...@@ -263,4 +266,6 @@ TEST_F(FullyConnectedOpTest, OPENCLHalfWidthFormatAligned) {
TestWXFormat<half>(1, 16, 32, 32, 32); TestWXFormat<half>(1, 16, 32, 32, 32);
} }
} } // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/fused_conv_2d.h" #include "mace/ops/fused_conv_2d.h"
namespace mace { namespace mace {
namespace ops {
void Register_FusedConv2D(OperatorRegistry *op_registry) { void Register_FusedConv2D(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("FusedConv2D") REGISTER_OPERATOR(op_registry, OpKeyBuilder("FusedConv2D")
...@@ -26,4 +27,5 @@ void Register_FusedConv2D(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_FusedConv2D(OperatorRegistry *op_registry) {
FusedConv2dOp<DeviceType::OPENCL, half>); FusedConv2dOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,12 +6,14 @@ ...@@ -6,12 +6,14 @@
#define MACE_OPS_FUSED_CONV_2D_H_ #define MACE_OPS_FUSED_CONV_2D_H_
#include <memory> #include <memory>
#include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/conv_2d.h" #include "mace/kernels/conv_2d.h"
#include "mace/ops/conv_pool_2d_base.h" #include "mace/ops/conv_pool_2d_base.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class FusedConv2dOp : public ConvPool2dOpBase<D, T> { class FusedConv2dOp : public ConvPool2dOpBase<D, T> {
...@@ -46,6 +48,7 @@ class FusedConv2dOp : public ConvPool2dOpBase<D, T> { ...@@ -46,6 +48,7 @@ class FusedConv2dOp : public ConvPool2dOpBase<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_FUSED_CONV_2D_H_ #endif // MACE_OPS_FUSED_CONV_2D_H_
...@@ -2,10 +2,14 @@ ...@@ -2,10 +2,14 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#include <vector>
#include "mace/ops/fused_conv_2d.h" #include "mace/ops/fused_conv_2d.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class FusedConv2dOpTest : public OpsTestBase {}; class FusedConv2dOpTest : public OpsTestBase {};
...@@ -274,14 +278,15 @@ static void TestComplexConvNxNS12(const std::vector<index_t> &shape) { ...@@ -274,14 +278,15 @@ static void TestComplexConvNxNS12(const std::vector<index_t> &shape) {
testing::internal::LogToStderr(); testing::internal::LogToStderr();
auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w, auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w,
Padding type) { Padding type) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 3 + (rand() % 10); static unsigned int seed = 123;
index_t batch = 3 + (rand_r(&seed) % 10);
index_t height = shape[0]; index_t height = shape[0];
index_t width = shape[1]; index_t width = shape[1];
index_t input_channels = shape[2] + (rand() % 10); index_t input_channels = shape[2] + (rand_r(&seed) % 10);
index_t output_channels = shape[3] + (rand() % 10); index_t output_channels = shape[3] + (rand_r(&seed) % 10);
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
OpDefBuilder("FusedConv2D", "FusedConv2dTest") OpDefBuilder("FusedConv2D", "FusedConv2dTest")
...@@ -350,14 +355,15 @@ static void TestHalfComplexConvNxNS12(const std::vector<index_t> &shape) { ...@@ -350,14 +355,15 @@ static void TestHalfComplexConvNxNS12(const std::vector<index_t> &shape) {
testing::internal::LogToStderr(); testing::internal::LogToStderr();
auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w, auto func = [&](int kernel_h, int kernel_w, int stride_h, int stride_w,
Padding type) { Padding type) {
srand(time(NULL)); // srand(time(NULL));
// generate random input // generate random input
index_t batch = 3 + (rand() % 10); static unsigned int seed = 123;
index_t batch = 3 + (rand_r(&seed) % 10);
index_t height = shape[0]; index_t height = shape[0];
index_t width = shape[1]; index_t width = shape[1];
index_t input_channels = shape[2] + (rand() % 10); index_t input_channels = shape[2] + (rand_r(&seed) % 10);
index_t output_channels = shape[3] + (rand() % 10); index_t output_channels = shape[3] + (rand_r(&seed) % 10);
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
OpDefBuilder("FusedConv2D", "FusedConv2dTest") OpDefBuilder("FusedConv2D", "FusedConv2dTest")
...@@ -676,3 +682,7 @@ TEST_F(FusedConv2dOpTest, OPENCL15X15AtrousConvD4) { ...@@ -676,3 +682,7 @@ TEST_F(FusedConv2dOpTest, OPENCL15X15AtrousConvD4) {
TestGeneralHalfAtrousConv<DeviceType::OPENCL>({63, 71}, {15, 15, 16, 16}, TestGeneralHalfAtrousConv<DeviceType::OPENCL>({63, 71}, {15, 15, 16, 16},
{2, 2}); {2, 2});
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/global_avg_pooling.h" #include "mace/ops/global_avg_pooling.h"
namespace mace { namespace mace {
namespace ops {
void Register_GlobalAvgPooling(OperatorRegistry *op_registry) { void Register_GlobalAvgPooling(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("GlobalAvgPooling") REGISTER_OPERATOR(op_registry, OpKeyBuilder("GlobalAvgPooling")
...@@ -14,4 +15,5 @@ void Register_GlobalAvgPooling(OperatorRegistry *op_registry) { ...@@ -14,4 +15,5 @@ void Register_GlobalAvgPooling(OperatorRegistry *op_registry) {
GlobalAvgPoolingOp<DeviceType::CPU, float>); GlobalAvgPoolingOp<DeviceType::CPU, float>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,10 +5,13 @@ ...@@ -5,10 +5,13 @@
#ifndef MACE_OPS_GLOBAL_AVG_POOLING_H_ #ifndef MACE_OPS_GLOBAL_AVG_POOLING_H_
#define MACE_OPS_GLOBAL_AVG_POOLING_H_ #define MACE_OPS_GLOBAL_AVG_POOLING_H_
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/global_avg_pooling.h" #include "mace/kernels/global_avg_pooling.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class GlobalAvgPoolingOp : public Operator<D, T> { class GlobalAvgPoolingOp : public Operator<D, T> {
...@@ -38,6 +41,7 @@ class GlobalAvgPoolingOp : public Operator<D, T> { ...@@ -38,6 +41,7 @@ class GlobalAvgPoolingOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_GLOBAL_AVG_POOLING_H_ #endif // MACE_OPS_GLOBAL_AVG_POOLING_H_
...@@ -7,8 +7,9 @@ ...@@ -7,8 +7,9 @@
#include "mace/core/testing/test_benchmark.h" #include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
using namespace mace::kernels; namespace ops {
namespace test {
template <DeviceType D> template <DeviceType D>
static void GlobalAvgPooling( static void GlobalAvgPooling(
...@@ -53,3 +54,7 @@ static void GlobalAvgPooling( ...@@ -53,3 +54,7 @@ static void GlobalAvgPooling(
BM_GLOBAL_AVG_POOLING(1, 3, 7, 7); BM_GLOBAL_AVG_POOLING(1, 3, 7, 7);
BM_GLOBAL_AVG_POOLING(1, 3, 64, 64); BM_GLOBAL_AVG_POOLING(1, 3, 64, 64);
BM_GLOBAL_AVG_POOLING(1, 3, 256, 256); BM_GLOBAL_AVG_POOLING(1, 3, 256, 256);
} // namespace test
} // namespace ops
} // namespace mace
...@@ -4,7 +4,9 @@ ...@@ -4,7 +4,9 @@
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class GlobalAvgPoolingOpTest : public OpsTestBase {}; class GlobalAvgPoolingOpTest : public OpsTestBase {};
...@@ -31,3 +33,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_CPU) { ...@@ -31,3 +33,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_CPU) {
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001); ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/image_to_buffer.h" #include "mace/ops/image_to_buffer.h"
namespace mace { namespace mace {
namespace ops {
void Register_ImageToBuffer(OperatorRegistry *op_registry) { void Register_ImageToBuffer(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("ImageToBuffer") REGISTER_OPERATOR(op_registry, OpKeyBuilder("ImageToBuffer")
...@@ -20,4 +21,5 @@ void Register_ImageToBuffer(OperatorRegistry *op_registry) { ...@@ -20,4 +21,5 @@ void Register_ImageToBuffer(OperatorRegistry *op_registry) {
ImageToBufferOp<DeviceType::OPENCL, half>); ImageToBufferOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "mace/kernels/buffer_to_image.h" #include "mace/kernels/buffer_to_image.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class ImageToBufferOp : public Operator<D, T> { class ImageToBufferOp : public Operator<D, T> {
...@@ -35,5 +36,7 @@ class ImageToBufferOp : public Operator<D, T> { ...@@ -35,5 +36,7 @@ class ImageToBufferOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_IMAGE_TO_BUFFER_H_ #endif // MACE_OPS_IMAGE_TO_BUFFER_H_
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/matmul.h" #include "mace/ops/matmul.h"
namespace mace { namespace mace {
namespace ops {
void Register_MatMul(OperatorRegistry *op_registry) { void Register_MatMul(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("MatMul") REGISTER_OPERATOR(op_registry, OpKeyBuilder("MatMul")
...@@ -26,4 +27,5 @@ void Register_MatMul(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_MatMul(OperatorRegistry *op_registry) {
MatMulOp<DeviceType::OPENCL, half>); MatMulOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "mace/kernels/matmul.h" #include "mace/kernels/matmul.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class MatMulOp : public Operator<D, T> { class MatMulOp : public Operator<D, T> {
...@@ -35,6 +36,7 @@ class MatMulOp : public Operator<D, T> { ...@@ -35,6 +36,7 @@ class MatMulOp : public Operator<D, T> {
kernels::MatMulFunctor<D, T> functor_; kernels::MatMulFunctor<D, T> functor_;
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_MATMUL_H_ #endif // MACE_OPS_MATMUL_H_
...@@ -3,11 +3,15 @@ ...@@ -3,11 +3,15 @@
// //
#include <string> #include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h" #include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void MatMulBenchmark( static void MatMulBenchmark(
int iters, int batch, int height, int channels, int out_width) { int iters, int batch, int height, int channels, int out_width) {
...@@ -71,4 +75,7 @@ BM_MATMUL(16, 32, 128, 3969); ...@@ -71,4 +75,7 @@ BM_MATMUL(16, 32, 128, 3969);
BM_MATMUL(16, 128, 128, 49); BM_MATMUL(16, 128, 128, 49);
BM_MATMUL(16, 128, 128, 961); BM_MATMUL(16, 128, 128, 961);
BM_MATMUL(16, 128, 128, 3969); BM_MATMUL(16, 128, 128, 3969);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -3,10 +3,13 @@ ...@@ -3,10 +3,13 @@
// //
#include <fstream> #include <fstream>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class MatMulOpTest : public OpsTestBase {}; class MatMulOpTest : public OpsTestBase {};
...@@ -170,4 +173,7 @@ TEST_F(MatMulOpTest, OPENCLHalfUnAlignedWithBatch) { ...@@ -170,4 +173,7 @@ TEST_F(MatMulOpTest, OPENCLHalfUnAlignedWithBatch) {
Complex<half>(16, 32, 64, 64); Complex<half>(16, 32, 64, 64);
Complex<half>(31, 31, 61, 67); Complex<half>(31, 31, 61, 67);
} }
}
} // namespace test
} // namespace ops
} // namespace mace
...@@ -2,10 +2,14 @@ ...@@ -2,10 +2,14 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#ifndef MACE_OPS_TEST_UTIL_H_ #ifndef MACE_OPS_OPS_TEST_UTIL_H_
#define MACE_OPS_TEST_UTIL_H_ #define MACE_OPS_OPS_TEST_UTIL_H_
#include <type_traits> #include <type_traits>
#include <limits>
#include <functional>
#include <vector>
#include <string>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "mace/core/net.h" #include "mace/core/net.h"
...@@ -16,6 +20,8 @@ ...@@ -16,6 +20,8 @@
#include "mace/utils/utils.h" #include "mace/utils/utils.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class OpDefBuilder { class OpDefBuilder {
public: public:
...@@ -95,7 +101,7 @@ class OpDefBuilder { ...@@ -95,7 +101,7 @@ class OpDefBuilder {
class OpsTestNet { class OpsTestNet {
public: public:
OpsTestNet() : op_registry_(new OperatorRegistry()){}; OpsTestNet() : op_registry_(new OperatorRegistry()) {}
template <DeviceType D, typename T> template <DeviceType D, typename T>
void AddInputFromArray(const std::string &name, void AddInputFromArray(const std::string &name,
...@@ -412,6 +418,8 @@ void ImageToBuffer(OpsTestNet &net, ...@@ -412,6 +418,8 @@ void ImageToBuffer(OpsTestNet &net,
net.Sync(); net.Sync();
} }
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_TEST_UTIL_H_ #endif // MACE_OPS_OPS_TEST_UTIL_H_
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/pooling.h" #include "mace/ops/pooling.h"
namespace mace { namespace mace {
namespace ops {
void Register_Pooling(OperatorRegistry *op_registry) { void Register_Pooling(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Pooling") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Pooling")
...@@ -30,4 +31,5 @@ void Register_Pooling(OperatorRegistry *op_registry) { ...@@ -30,4 +31,5 @@ void Register_Pooling(OperatorRegistry *op_registry) {
PoolingOp<DeviceType::OPENCL, half>); PoolingOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,11 +5,14 @@ ...@@ -5,11 +5,14 @@
#ifndef MACE_OPS_POOLING_H_ #ifndef MACE_OPS_POOLING_H_
#define MACE_OPS_POOLING_H_ #define MACE_OPS_POOLING_H_
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/pooling.h" #include "mace/kernels/pooling.h"
#include "mace/ops/conv_pool_2d_base.h" #include "mace/ops/conv_pool_2d_base.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class PoolingOp : public ConvPool2dOpBase<D, T> { class PoolingOp : public ConvPool2dOpBase<D, T> {
...@@ -25,7 +28,7 @@ class PoolingOp : public ConvPool2dOpBase<D, T> { ...@@ -25,7 +28,7 @@ class PoolingOp : public ConvPool2dOpBase<D, T> {
this->strides_.data(), this->strides_.data(),
this->padding_type_, this->padding_type_,
this->paddings_, this->paddings_,
this->dilations_.data()){}; this->dilations_.data()) {}
bool Run(StatsFuture *future) override { bool Run(StatsFuture *future) override {
const Tensor *input = this->Input(INPUT); const Tensor *input = this->Input(INPUT);
...@@ -44,6 +47,7 @@ class PoolingOp : public ConvPool2dOpBase<D, T> { ...@@ -44,6 +47,7 @@ class PoolingOp : public ConvPool2dOpBase<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_POOLING_H_ #endif // MACE_OPS_POOLING_H_
...@@ -8,8 +8,9 @@ ...@@ -8,8 +8,9 @@
#include "mace/kernels/conv_pool_2d_util.h" #include "mace/kernels/conv_pool_2d_util.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
using namespace mace::kernels; namespace ops {
namespace test {
template <DeviceType D> template <DeviceType D>
static void Pooling(int iters, static void Pooling(int iters,
...@@ -70,3 +71,7 @@ BM_POOLING(1, 3, 129, 129, 2, 2, SAME, MAX); ...@@ -70,3 +71,7 @@ BM_POOLING(1, 3, 129, 129, 2, 2, SAME, MAX);
BM_POOLING(1, 3, 257, 257, 2, 2, SAME, MAX); BM_POOLING(1, 3, 257, 257, 2, 2, SAME, MAX);
BM_POOLING(1, 3, 513, 513, 2, 2, SAME, MAX); BM_POOLING(1, 3, 513, 513, 2, 2, SAME, MAX);
BM_POOLING(1, 3, 1025, 1025, 2, 2, SAME, MAX); BM_POOLING(1, 3, 1025, 1025, 2, 2, SAME, MAX);
} // namespace test
} // namespace ops
} // namespace mace
...@@ -9,7 +9,9 @@ ...@@ -9,7 +9,9 @@
#include "mace/ops/conv_pool_2d_base.h" #include "mace/ops/conv_pool_2d_base.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class PoolingOpTest : public OpsTestBase {}; class PoolingOpTest : public OpsTestBase {};
...@@ -393,3 +395,7 @@ TEST_F(PoolingOpTest, OPENCLUnAlignedLargeKernelAvgPooling) { ...@@ -393,3 +395,7 @@ TEST_F(PoolingOpTest, OPENCLUnAlignedLargeKernelAvgPooling) {
AvgPoolingTest<OPENCL, float>({3, 31, 37, 128}, {8, 8}, {8, 8}, AvgPoolingTest<OPENCL, float>({3, 31, 37, 128}, {8, 8}, {8, 8},
Padding::SAME); Padding::SAME);
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/reshape.h" #include "mace/ops/reshape.h"
namespace mace { namespace mace {
namespace ops {
void Register_Reshape(OperatorRegistry *op_registry) { void Register_Reshape(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Reshape") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Reshape")
...@@ -14,4 +15,5 @@ void Register_Reshape(OperatorRegistry *op_registry) { ...@@ -14,4 +15,5 @@ void Register_Reshape(OperatorRegistry *op_registry) {
ReshapeOp<DeviceType::CPU, float>); ReshapeOp<DeviceType::CPU, float>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,10 +5,13 @@ ...@@ -5,10 +5,13 @@
#ifndef MACE_OPS_RESHAPE_H_ #ifndef MACE_OPS_RESHAPE_H_
#define MACE_OPS_RESHAPE_H_ #define MACE_OPS_RESHAPE_H_
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/reshape.h" #include "mace/kernels/reshape.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class ReshapeOp : public Operator<D, T> { class ReshapeOp : public Operator<D, T> {
...@@ -61,6 +64,7 @@ class ReshapeOp : public Operator<D, T> { ...@@ -61,6 +64,7 @@ class ReshapeOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_RESHAPE_H_ #endif // MACE_OPS_RESHAPE_H_
...@@ -6,7 +6,9 @@ ...@@ -6,7 +6,9 @@
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class ReshapeTest : public OpsTestBase {}; class ReshapeTest : public OpsTestBase {};
...@@ -53,3 +55,7 @@ TEST_F(ReshapeTest, Complex) { ...@@ -53,3 +55,7 @@ TEST_F(ReshapeTest, Complex) {
TestReshape({1, 2, 3, 4}, {-1, 1}, {24, 1}); TestReshape({1, 2, 3, 4}, {-1, 1}, {24, 1});
TestReshape({1, 2, 3, 4}, {1, 3, 8}, {1, 3, 8}); TestReshape({1, 2, 3, 4}, {1, 3, 8}, {1, 3, 8});
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/resize_bilinear.h" #include "mace/ops/resize_bilinear.h"
namespace mace { namespace mace {
namespace ops {
void Register_ResizeBilinear(OperatorRegistry *op_registry) { void Register_ResizeBilinear(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("ResizeBilinear") REGISTER_OPERATOR(op_registry, OpKeyBuilder("ResizeBilinear")
...@@ -26,4 +27,5 @@ void Register_ResizeBilinear(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_ResizeBilinear(OperatorRegistry *op_registry) {
ResizeBilinearOp<DeviceType::OPENCL, half>); ResizeBilinearOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,13 +2,14 @@ ...@@ -2,13 +2,14 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#ifndef MACE_RESIZE_BILINEAR_H #ifndef MACE_OPS_RESIZE_BILINEAR_H_
#define MACE_RESIZE_BILINEAR_H #define MACE_OPS_RESIZE_BILINEAR_H_
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/resize_bilinear.h" #include "mace/kernels/resize_bilinear.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class ResizeBilinearOp : public Operator<D, T> { class ResizeBilinearOp : public Operator<D, T> {
...@@ -34,6 +35,7 @@ class ResizeBilinearOp : public Operator<D, T> { ...@@ -34,6 +35,7 @@ class ResizeBilinearOp : public Operator<D, T> {
kernels::ResizeBilinearFunctor<D, T> functor_; kernels::ResizeBilinearFunctor<D, T> functor_;
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_RESIZE_BILINEAR_H #endif // MACE_OPS_RESIZE_BILINEAR_H_
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void ResizeBilinearBenchmark(int iters, static void ResizeBilinearBenchmark(int iters,
int batch, int batch,
...@@ -84,4 +87,6 @@ BM_RESIZE_BILINEAR(1, 128, 240, 240, 480, 480); ...@@ -84,4 +87,6 @@ BM_RESIZE_BILINEAR(1, 128, 240, 240, 480, 480);
BM_RESIZE_BILINEAR(1, 3, 4032, 3016, 480, 480); BM_RESIZE_BILINEAR(1, 3, 4032, 3016, 480, 480);
BM_RESIZE_BILINEAR(1, 3, 480, 480, 4032, 3016); BM_RESIZE_BILINEAR(1, 3, 480, 480, 4032, 3016);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,11 +2,15 @@ ...@@ -2,11 +2,15 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#include "mace/ops/resize_bilinear.h" #include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/ops/resize_bilinear.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class ResizeBilinearTest : public OpsTestBase {}; class ResizeBilinearTest : public OpsTestBase {};
...@@ -61,17 +65,17 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) { ...@@ -61,17 +65,17 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) {
template <DeviceType D> template <DeviceType D>
void TestRandomResizeBilinear() { void TestRandomResizeBilinear() {
srand(time(nullptr)); // srand(time(nullptr));
testing::internal::LogToStderr(); testing::internal::LogToStderr();
static unsigned int seed = 123;
for (int round = 0; round < 10; ++round) { for (int round = 0; round < 10; ++round) {
int batch = 1 + rand() % 5; int batch = 1 + rand_r(&seed) % 5;
int channels = 1 + rand() % 100; int channels = 1 + rand_r(&seed) % 100;
int height = 1 + rand() % 100; int height = 1 + rand_r(&seed) % 100;
int width = 1 + rand() % 100; int width = 1 + rand_r(&seed) % 100;
int in_height = 1 + rand() % 100; int in_height = 1 + rand_r(&seed) % 100;
int in_width = 1 + rand() % 100; int in_width = 1 + rand_r(&seed) % 100;
int align_corners = rand() % 1; int align_corners = rand_r(&seed) % 1;
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
...@@ -106,7 +110,7 @@ void TestRandomResizeBilinear() { ...@@ -106,7 +110,7 @@ void TestRandomResizeBilinear() {
ImageToBuffer<D, float>(net, "OutputImage", "DeviceOutput", ImageToBuffer<D, float>(net, "OutputImage", "DeviceOutput",
kernels::BufferType::IN_OUT_CHANNEL); kernels::BufferType::IN_OUT_CHANNEL);
} else { } else {
// TODO support NEON // TODO(someone): support NEON
} }
// Check // Check
ExpectTensorNear<float>(expected, *net.GetOutput("DeviceOutput"), 0.001); ExpectTensorNear<float>(expected, *net.GetOutput("DeviceOutput"), 0.001);
...@@ -122,3 +126,7 @@ TEST_F(ResizeBilinearTest, NEONRandomResizeBilinear) { ...@@ -122,3 +126,7 @@ TEST_F(ResizeBilinearTest, NEONRandomResizeBilinear) {
TEST_F(ResizeBilinearTest, OPENCLRandomResizeBilinear) { TEST_F(ResizeBilinearTest, OPENCLRandomResizeBilinear) {
TestRandomResizeBilinear<DeviceType::OPENCL>(); TestRandomResizeBilinear<DeviceType::OPENCL>();
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/slice.h" #include "mace/ops/slice.h"
namespace mace { namespace mace {
namespace ops {
void Register_Slice(OperatorRegistry *op_registry) { void Register_Slice(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Slice") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Slice")
...@@ -25,4 +26,5 @@ void Register_Slice(OperatorRegistry *op_registry) { ...@@ -25,4 +26,5 @@ void Register_Slice(OperatorRegistry *op_registry) {
SliceOp<DeviceType::OPENCL, half>); SliceOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,9 +5,13 @@ ...@@ -5,9 +5,13 @@
#ifndef MACE_OPS_SLICE_H_ #ifndef MACE_OPS_SLICE_H_
#define MACE_OPS_SLICE_H_ #define MACE_OPS_SLICE_H_
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/slice.h" #include "mace/kernels/slice.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class SliceOp : public Operator<D, T> { class SliceOp : public Operator<D, T> {
...@@ -32,6 +36,7 @@ class SliceOp : public Operator<D, T> { ...@@ -32,6 +36,7 @@ class SliceOp : public Operator<D, T> {
OP_INPUT_TAGS(INPUT); OP_INPUT_TAGS(INPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_SLICE_H_ #endif // MACE_OPS_SLICE_H_
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template<DeviceType D, typename T> template<DeviceType D, typename T>
static void BMSliceHelper(int iters, static void BMSliceHelper(int iters,
const std::vector<index_t> &input_shape, const std::vector<index_t> &input_shape,
...@@ -75,5 +78,6 @@ BM_SLICE(1, 32, 32, 256, 2); ...@@ -75,5 +78,6 @@ BM_SLICE(1, 32, 32, 256, 2);
BM_SLICE(1, 128, 128, 32, 2); BM_SLICE(1, 128, 128, 32, 2);
BM_SLICE(1, 128, 128, 128, 2); BM_SLICE(1, 128, 128, 128, 2);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,22 +2,27 @@ ...@@ -2,22 +2,27 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#include <functional>
#include "gmock/gmock.h"
#include "mace/ops/slice.h" #include "mace/ops/slice.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
#include "gmock/gmock.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
class SliceOpTest : public OpsTestBase {}; class SliceOpTest : public OpsTestBase {};
template<DeviceType D, typename T> template<DeviceType D, typename T>
void RandomTest(const int num_outputs) { void RandomTest(const int num_outputs) {
srand(time(nullptr)); // srand(time(nullptr));
const index_t output_channels = 4 * (1 + rand() % 10); static unsigned int seed = 123;
const index_t output_channels = 4 * (1 + rand_r(&seed) % 10);
const index_t input_channels = num_outputs * output_channels; const index_t input_channels = num_outputs * output_channels;
const index_t batch = 3 + (rand() % 10); const index_t batch = 3 + (rand_r(&seed) % 10);
const index_t height = 13 + (rand() % 10); const index_t height = 13 + (rand_r(&seed) % 10);
const index_t width = 17 + (rand() % 10); const index_t width = 17 + (rand_r(&seed) % 10);
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
...@@ -47,7 +52,6 @@ void RandomTest(const int num_outputs) { ...@@ -47,7 +52,6 @@ void RandomTest(const int num_outputs) {
builder = builder.Output(MakeString("Output", i)); builder = builder.Output(MakeString("Output", i));
} }
builder.Finalize(net.NewOperatorDef()); builder.Finalize(net.NewOperatorDef());
} }
// Run // Run
...@@ -97,3 +101,7 @@ TEST_F(SliceOpTest, OPENCLHalf) { ...@@ -97,3 +101,7 @@ TEST_F(SliceOpTest, OPENCLHalf) {
RandomTest<DeviceType::OPENCL, half>(4); RandomTest<DeviceType::OPENCL, half>(4);
RandomTest<DeviceType::OPENCL, half>(11); RandomTest<DeviceType::OPENCL, half>(11);
} }
} // namespace test
} // namespace ops
} // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/softmax.h" #include "mace/ops/softmax.h"
namespace mace { namespace mace {
namespace ops {
void Register_Softmax(OperatorRegistry *op_registry) { void Register_Softmax(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("Softmax") REGISTER_OPERATOR(op_registry, OpKeyBuilder("Softmax")
...@@ -26,4 +27,5 @@ void Register_Softmax(OperatorRegistry *op_registry) { ...@@ -26,4 +27,5 @@ void Register_Softmax(OperatorRegistry *op_registry) {
SoftmaxOp<DeviceType::OPENCL, half>); SoftmaxOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -2,13 +2,14 @@ ...@@ -2,13 +2,14 @@
// Copyright (c) 2017 XiaoMi All rights reserved. // Copyright (c) 2017 XiaoMi All rights reserved.
// //
#ifndef MACE_SOFTMAX_H_ #ifndef MACE_OPS_SOFTMAX_H_
#define MACE_SOFTMAX_H_ #define MACE_OPS_SOFTMAX_H_
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/softmax.h" #include "mace/kernels/softmax.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, class T> template <DeviceType D, class T>
class SoftmaxOp : public Operator<D, T> { class SoftmaxOp : public Operator<D, T> {
...@@ -34,6 +35,7 @@ class SoftmaxOp : public Operator<D, T> { ...@@ -34,6 +35,7 @@ class SoftmaxOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_SOFTMAX_H_ #endif // MACE_OPS_SOFTMAX_H_
...@@ -3,11 +3,15 @@ ...@@ -3,11 +3,15 @@
// //
#include <string> #include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h" #include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void SoftmaxBenchmark( static void SoftmaxBenchmark(
int iters, int batch, int channels, int height, int width) { int iters, int batch, int channels, int height, int width) {
...@@ -66,4 +70,7 @@ BM_SOFTMAX(1, 3, 512, 512); ...@@ -66,4 +70,7 @@ BM_SOFTMAX(1, 3, 512, 512);
BM_SOFTMAX(1, 4, 512, 512); BM_SOFTMAX(1, 4, 512, 512);
BM_SOFTMAX(1, 10, 256, 256); BM_SOFTMAX(1, 10, 256, 256);
BM_SOFTMAX(1, 1024, 7, 7); BM_SOFTMAX(1, 1024, 7, 7);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class SoftmaxOpTest : public OpsTestBase {}; class SoftmaxOpTest : public OpsTestBase {};
...@@ -102,4 +104,6 @@ TEST_F(SoftmaxOpTest, OPENCLUnAligned) { ...@@ -102,4 +104,6 @@ TEST_F(SoftmaxOpTest, OPENCLUnAligned) {
Complex<DeviceType::OPENCL>({5, 211, 107, 1}); Complex<DeviceType::OPENCL>({5, 211, 107, 1});
} }
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/space_to_batch.h" #include "mace/ops/space_to_batch.h"
namespace mace { namespace mace {
namespace ops {
void Register_SpaceToBatchND(OperatorRegistry *op_registry) { void Register_SpaceToBatchND(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("SpaceToBatchND") REGISTER_OPERATOR(op_registry, OpKeyBuilder("SpaceToBatchND")
...@@ -19,4 +20,5 @@ void Register_SpaceToBatchND(OperatorRegistry *op_registry) { ...@@ -19,4 +20,5 @@ void Register_SpaceToBatchND(OperatorRegistry *op_registry) {
SpaceToBatchNDOp<DeviceType::OPENCL, half>); SpaceToBatchNDOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,11 +6,13 @@ ...@@ -6,11 +6,13 @@
#define MACE_OPS_SPACE_TO_BATCH_H_ #define MACE_OPS_SPACE_TO_BATCH_H_
#include <memory> #include <memory>
#include <vector>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/space_to_batch.h" #include "mace/kernels/space_to_batch.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class SpaceToBatchNDOp : public Operator<D, T> { class SpaceToBatchNDOp : public Operator<D, T> {
...@@ -71,6 +73,7 @@ class SpaceToBatchNDOp : public Operator<D, T> { ...@@ -71,6 +73,7 @@ class SpaceToBatchNDOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_SPACE_TO_BATCH_H_ #endif // MACE_OPS_SPACE_TO_BATCH_H_
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void BMSpaceToBatch( static void BMSpaceToBatch(
int iters, int batch, int height, int width, int channels, int shape) { int iters, int batch, int height, int width, int channels, int shape) {
...@@ -55,4 +58,7 @@ static void BMSpaceToBatch( ...@@ -55,4 +58,7 @@ static void BMSpaceToBatch(
BM_SPACE_TO_BATCH(128, 16, 16, 128, 2); BM_SPACE_TO_BATCH(128, 16, 16, 128, 2);
BM_SPACE_TO_BATCH(1, 256, 256, 32, 2); BM_SPACE_TO_BATCH(1, 256, 256, 32, 2);
BM_SPACE_TO_BATCH(1, 256, 256, 32, 4); BM_SPACE_TO_BATCH(1, 256, 256, 32, 4);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
...@@ -3,10 +3,13 @@ ...@@ -3,10 +3,13 @@
// //
#include <fstream> #include <fstream>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
using namespace mace; namespace mace {
namespace ops {
namespace test {
template <DeviceType D> template <DeviceType D>
void RunSpaceToBatch(const std::vector<index_t> &input_shape, void RunSpaceToBatch(const std::vector<index_t> &input_shape,
...@@ -216,3 +219,7 @@ TEST(SpaceToBatchTest, MultiBatchAndChannelData) { ...@@ -216,3 +219,7 @@ TEST(SpaceToBatchTest, MultiBatchAndChannelData) {
// {2, 2, 2, 2}, // {2, 2, 2, 2},
// space_tensor.get()); // space_tensor.get());
//} //}
} // namespace test
} // namespace ops
} // namespace mace
...@@ -3,11 +3,14 @@ ...@@ -3,11 +3,14 @@
// //
#include <fstream> #include <fstream>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/conv_pool_2d_util.h" #include "mace/kernels/conv_pool_2d_util.h"
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
class WinogradConvlutionTest : public OpsTestBase {}; class WinogradConvlutionTest : public OpsTestBase {};
...@@ -39,7 +42,7 @@ void WinogradConvolution(const index_t batch, ...@@ -39,7 +42,7 @@ void WinogradConvolution(const index_t batch,
const index_t in_channels, const index_t in_channels,
const index_t out_channels, const index_t out_channels,
const Padding padding) { const Padding padding) {
srand(time(NULL)); // srand(time(NULL));
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
...@@ -156,7 +159,7 @@ void WinogradConvolutionWithPad(const index_t batch, ...@@ -156,7 +159,7 @@ void WinogradConvolutionWithPad(const index_t batch,
const index_t in_channels, const index_t in_channels,
const index_t out_channels, const index_t out_channels,
const int padding) { const int padding) {
srand(time(NULL)); // srand(time(NULL));
// Construct graph // Construct graph
OpsTestNet net; OpsTestNet net;
...@@ -245,9 +248,6 @@ void WinogradConvolutionWithPad(const index_t batch, ...@@ -245,9 +248,6 @@ void WinogradConvolutionWithPad(const index_t batch,
} }
} }
TEST_F(WinogradConvlutionTest, UnAlignedConvolutionPad2) { } // namespace test
WinogradConvolutionWithPad<DeviceType::OPENCL, float>(1, 64, 64, 40, 19, 2); } // namespace ops
WinogradConvolutionWithPad<DeviceType::OPENCL, float>(1, 32, 32, 96, 109, 2); } // namespace mace
}
}
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/winograd_inverse_transform.h" #include "mace/ops/winograd_inverse_transform.h"
namespace mace { namespace mace {
namespace ops {
void Register_WinogradInverseTransform(OperatorRegistry *op_registry) { void Register_WinogradInverseTransform(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("WinogradInverseTransform") REGISTER_OPERATOR(op_registry, OpKeyBuilder("WinogradInverseTransform")
...@@ -19,4 +20,5 @@ void Register_WinogradInverseTransform(OperatorRegistry *op_registry) { ...@@ -19,4 +20,5 @@ void Register_WinogradInverseTransform(OperatorRegistry *op_registry) {
WinogradInverseTransformOp<DeviceType::OPENCL, half>); WinogradInverseTransformOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -6,12 +6,14 @@ ...@@ -6,12 +6,14 @@
#define MACE_OPS_WINOGRAD_INVERSE_TRANSFORM_H_ #define MACE_OPS_WINOGRAD_INVERSE_TRANSFORM_H_
#include <memory> #include <memory>
#include <string>
#include "mace/core/operator.h" #include "mace/core/operator.h"
#include "mace/kernels/activation.h" #include "mace/kernels/activation.h"
#include "mace/kernels/winograd_transform.h" #include "mace/kernels/winograd_transform.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class WinogradInverseTransformOp : public Operator<D, T> { class WinogradInverseTransformOp : public Operator<D, T> {
...@@ -42,6 +44,7 @@ class WinogradInverseTransformOp : public Operator<D, T> { ...@@ -42,6 +44,7 @@ class WinogradInverseTransformOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_WINOGRAD_INVERSE_TRANSFORM_H_ #endif // MACE_OPS_WINOGRAD_INVERSE_TRANSFORM_H_
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "mace/ops/winograd_transform.h" #include "mace/ops/winograd_transform.h"
namespace mace { namespace mace {
namespace ops {
void Register_WinogradTransform(OperatorRegistry *op_registry) { void Register_WinogradTransform(OperatorRegistry *op_registry) {
REGISTER_OPERATOR(op_registry, OpKeyBuilder("WinogradTransform") REGISTER_OPERATOR(op_registry, OpKeyBuilder("WinogradTransform")
...@@ -19,4 +20,5 @@ void Register_WinogradTransform(OperatorRegistry *op_registry) { ...@@ -19,4 +20,5 @@ void Register_WinogradTransform(OperatorRegistry *op_registry) {
WinogradTransformOp<DeviceType::OPENCL, half>); WinogradTransformOp<DeviceType::OPENCL, half>);
} }
} // namespace ops
} // namespace mace } // namespace mace
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "mace/kernels/winograd_transform.h" #include "mace/kernels/winograd_transform.h"
namespace mace { namespace mace {
namespace ops {
template <DeviceType D, typename T> template <DeviceType D, typename T>
class WinogradTransformOp : public Operator<D, T> { class WinogradTransformOp : public Operator<D, T> {
...@@ -37,6 +38,7 @@ class WinogradTransformOp : public Operator<D, T> { ...@@ -37,6 +38,7 @@ class WinogradTransformOp : public Operator<D, T> {
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
}; };
} // namespace ops
} // namespace mace } // namespace mace
#endif // MACE_OPS_WINOGRAD_TRANSFORM_H_ #endif // MACE_OPS_WINOGRAD_TRANSFORM_H_
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#include "mace/ops/ops_test_util.h" #include "mace/ops/ops_test_util.h"
namespace mace { namespace mace {
namespace ops {
namespace test {
template <DeviceType D, typename T> template <DeviceType D, typename T>
static void BMWinogradTransform( static void BMWinogradTransform(
int iters, int batch, int height, int width, int channels) { int iters, int batch, int height, int width, int channels) {
...@@ -105,4 +108,6 @@ BM_WINOGRAD_INVERSE_TRANSFORM(1, 14, 14, 32); ...@@ -105,4 +108,6 @@ BM_WINOGRAD_INVERSE_TRANSFORM(1, 14, 14, 32);
BM_WINOGRAD_INVERSE_TRANSFORM(1, 62, 62, 32); BM_WINOGRAD_INVERSE_TRANSFORM(1, 62, 62, 32);
BM_WINOGRAD_INVERSE_TRANSFORM(1, 126, 126, 32); BM_WINOGRAD_INVERSE_TRANSFORM(1, 126, 126, 32);
} // namespace test
} // namespace ops
} // namespace mace } // namespace mace
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册