From 3b30078243319f140df2b9326264b0bfae4ca6ab Mon Sep 17 00:00:00 2001 From: Unknown Date: Wed, 28 Mar 2018 16:28:13 +0800 Subject: [PATCH] Support super resolution & fix d2s opencl bugs add neg/scalar_math --- mace/core/operator.cc | 4 + mace/kernels/eltwise.h | 23 ++- mace/kernels/negative.h | 69 ++++++++ mace/kernels/opencl/cl/depth_to_space.cl | 3 - mace/kernels/opencl/cl/eltwise.cl | 3 +- mace/kernels/opencl/cl/neg.cl | 14 ++ mace/kernels/opencl/cl/scalar_math.cl | 27 ++++ mace/kernels/opencl/depth_to_space_opencl.cc | 21 +-- mace/kernels/opencl/eltwise_opencl.cc | 2 +- mace/kernels/opencl/negative_opencl.cc | 65 ++++++++ mace/kernels/opencl/scalar_math_opencl.cc | 57 +++++++ mace/kernels/opencl/space_to_batch_opencl.cc | 2 +- mace/kernels/scalar_math.h | 96 +++++++++++ mace/ops/conv_2d.h | 2 +- mace/ops/depth_to_space.h | 9 +- mace/ops/depth_to_space_test.cc | 104 ++++++++---- mace/ops/neg.cc | 31 ++++ mace/ops/neg.h | 39 +++++ mace/ops/neg_benchmark.cc | 82 ++++++++++ mace/ops/neg_test.cc | 61 +++++++ mace/ops/scalar_math.cc | 31 ++++ mace/ops/scalar_math.h | 49 ++++++ mace/ops/scalar_math_benchmark.cc | 88 ++++++++++ mace/ops/scalar_math_test.cc | 160 +++++++++++++++++++ mace/python/tools/tf_converter_lib.py | 78 +++++++++ 25 files changed, 1060 insertions(+), 60 deletions(-) create mode 100644 mace/kernels/negative.h create mode 100644 mace/kernels/opencl/cl/neg.cl create mode 100644 mace/kernels/opencl/cl/scalar_math.cl create mode 100644 mace/kernels/opencl/negative_opencl.cc create mode 100644 mace/kernels/opencl/scalar_math_opencl.cc create mode 100644 mace/kernels/scalar_math.h create mode 100644 mace/ops/neg.cc create mode 100644 mace/ops/neg.h create mode 100644 mace/ops/neg_benchmark.cc create mode 100644 mace/ops/neg_test.cc create mode 100644 mace/ops/scalar_math.cc create mode 100644 mace/ops/scalar_math.h create mode 100644 mace/ops/scalar_math_benchmark.cc create mode 100644 mace/ops/scalar_math_test.cc diff --git a/mace/core/operator.cc b/mace/core/operator.cc index 38a220eb..d98b999f 100644 --- a/mace/core/operator.cc +++ b/mace/core/operator.cc @@ -82,12 +82,14 @@ extern void Register_FusedConv2D(OperatorRegistry *op_registry); extern void Register_GlobalAvgPooling(OperatorRegistry *op_registry); extern void Register_ImageToBuffer(OperatorRegistry *op_registry); extern void Register_MatMul(OperatorRegistry *op_registry); +extern void Register_Neg(OperatorRegistry *op_registry); extern void Register_Pooling(OperatorRegistry *op_registry); extern void Register_Proposal(OperatorRegistry *op_registry); extern void Register_PSROIAlign(OperatorRegistry *op_registry); extern void Register_ReOrganize(OperatorRegistry *op_registry); extern void Register_Reshape(OperatorRegistry *op_registry); extern void Register_ResizeBilinear(OperatorRegistry *op_registry); +extern void Register_ScalarMath(OperatorRegistry *op_registry); extern void Register_Slice(OperatorRegistry *op_registry); extern void Register_Softmax(OperatorRegistry *op_registry); extern void Register_SpaceToBatchND(OperatorRegistry *op_registry); @@ -118,12 +120,14 @@ OperatorRegistry::OperatorRegistry() { ops::Register_GlobalAvgPooling(this); ops::Register_ImageToBuffer(this); ops::Register_MatMul(this); + ops::Register_Neg(this); ops::Register_Pooling(this); ops::Register_Proposal(this); ops::Register_PSROIAlign(this); ops::Register_ReOrganize(this); ops::Register_Reshape(this); ops::Register_ResizeBilinear(this); + ops::Register_ScalarMath(this); ops::Register_Slice(this); ops::Register_Softmax(this); ops::Register_SpaceToBatchND(this); diff --git a/mace/kernels/eltwise.h b/mace/kernels/eltwise.h index 0f9e9b40..a3c63330 100644 --- a/mace/kernels/eltwise.h +++ b/mace/kernels/eltwise.h @@ -19,6 +19,7 @@ enum EltwiseType { SUM = 1, MAX = 2, MIN = 3, + SUB = 4, }; struct EltwiseFunctorBase { @@ -40,7 +41,7 @@ struct EltwiseFunctor : EltwiseFunctorBase { StatsFuture *future) { Tensor::MappingGuard input0_guard(input0); Tensor::MappingGuard input1_guard(input1); - Tensor::MappingGuard output_guard(output); + Tensor::MappingGuard output_guard(output); const T *input0_ptr = input0->data(); const T *input1_ptr = input1->data(); @@ -51,35 +52,41 @@ struct EltwiseFunctor : EltwiseFunctorBase { case PROD: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { - output_ptr[i] = input0_ptr[i] * input1_ptr[i]; + output_ptr[i] = input0_ptr[i] * input1_ptr[i]; } break; case SUM: - if (coeff_.empty()) { + if (coeff_.empty()) { #pragma omp parallel for - for (index_t i = 0; i < size; ++i) { + for (index_t i = 0; i < size; ++i) { output_ptr[i] = input0_ptr[i] + input1_ptr[i]; } - } else { + } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output_ptr[i] = - coeff_[0] * input0_ptr[i] + coeff_[1] * input1_ptr[i]; + coeff_[0] * input0_ptr[i] + coeff_[1] * input1_ptr[i]; } } break; - case MAX: + case MAX: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output_ptr[i] = std::max(input0_ptr[i], input1_ptr[i]); } break; - case MIN: + case MIN: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output_ptr[i] = std::min(input0_ptr[i], input1_ptr[i]); } break; + case SUB: +#pragma omp parallel for + for (index_t i = 0; i < size; ++i) { + output_ptr[i] = input0_ptr[i] - input1_ptr[i]; + } + break; default: LOG(FATAL) << "Eltwise op not support type " << type_; } diff --git a/mace/kernels/negative.h b/mace/kernels/negative.h new file mode 100644 index 00000000..544a1854 --- /dev/null +++ b/mace/kernels/negative.h @@ -0,0 +1,69 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#ifndef MACE_KERNELS_NEGATIVE_H_ +#define MACE_KERNELS_NEGATIVE_H_ + +#include + +#include "mace/core/future.h" +#include "mace/core/runtime/opencl/cl2_header.h" +#include "mace/core/tensor.h" +#include "mace/public/mace.h" + +namespace mace { +namespace kernels { + +template +struct NegFunctor { + void operator()(const Tensor *input, + Tensor *output, + StatsFuture *future) { + const index_t batch = input->dim(0); + const index_t height = input->dim(1); + const index_t width = input->dim(2); + const index_t channels = input->dim(3); + + Tensor::MappingGuard input_mapper(input); + Tensor::MappingGuard output_mapper(output); + + const T *input_ptr = input->data(); + T *output_ptr = output->mutable_data(); + +#pragma omp parallel for collapse(4) + for (index_t n = 0; n < batch; ++n) { + for (index_t h = 0; h < height; ++h) { + for (index_t w = 0; w < width; ++w) { + for (index_t c = 0; c < channels; ++c) { + index_t pos = (((n * height) + h) * width + w) * channels + c; + output_ptr[pos] = 0 - input_ptr[pos]; + } + } + } + } + } +}; + +/* +template <> +void NegFunctor::operator()( + const Tensor *input, + const Tensor *bias, + Tensor *output, + StatsFuture *future); +*/ + +template +struct NegFunctor { + void operator()(const Tensor *input, + Tensor *output, + StatsFuture *future); + cl::Kernel kernel_; + std::vector input_shape_; +}; + +} // namespace kernels +} // namespace mace + +#endif // MACE_KERNELS_NEGATIVE_H_ diff --git a/mace/kernels/opencl/cl/depth_to_space.cl b/mace/kernels/opencl/cl/depth_to_space.cl index 824f8266..0ce72428 100644 --- a/mace/kernels/opencl/cl/depth_to_space.cl +++ b/mace/kernels/opencl/cl/depth_to_space.cl @@ -10,19 +10,16 @@ __kernel void depth_to_space(__read_only image2d_t input, const int output_width = get_global_size(1); const int out_pos = mad24(out_d, output_width, out_w); - const int input_width = output_width / block_size; const int in_h = out_h / block_size; const int offset_h = out_h % block_size; const int in_w = out_w / block_size; const int offset_w = out_w % block_size; - const int offset_d = (offset_h * block_size + offset_w) * output_depth; const int in_d = out_d + offset_d; const int in_pos = mad24(in_d, input_width, in_w); - DATA_TYPE4 in_data = READ_IMAGET(input, SAMPLER, (int2)(in_pos, in_h)); WRITE_IMAGET(output, (int2)(out_pos, out_h), in_data); } diff --git a/mace/kernels/opencl/cl/eltwise.cl b/mace/kernels/opencl/cl/eltwise.cl index 735bc96e..dc6ca93d 100644 --- a/mace/kernels/opencl/cl/eltwise.cl +++ b/mace/kernels/opencl/cl/eltwise.cl @@ -27,8 +27,9 @@ __kernel void eltwise(__read_only image2d_t input0, /* [c%4 * w * c/4, h * b] */ out = fmax(in0, in1); #elif ELTWISE_TYPE == 3 out = fmin(in0, in1); +#elif ELTWISE_TYPE == 4 + out = in0 - in1; #endif WRITE_IMAGET(output, (int2)(w, hb), out); } - diff --git a/mace/kernels/opencl/cl/neg.cl b/mace/kernels/opencl/cl/neg.cl new file mode 100644 index 00000000..7b539dda --- /dev/null +++ b/mace/kernels/opencl/cl/neg.cl @@ -0,0 +1,14 @@ +#include +// Supported data types: half/float +__kernel void neg(__read_only image2d_t input, + __write_only image2d_t output) { + const int ch_blk = get_global_id(0); + const int w = get_global_id(1); + const int hb = get_global_id(2); + const int width = get_global_size(1); + + const int pos = mad24(ch_blk, width, w); + DATA_TYPE4 in = READ_IMAGET(input, SAMPLER, (int2)(pos, hb)); + DATA_TYPE4 out = 0 - in; + WRITE_IMAGET(output, (int2)(pos, hb), out); +} diff --git a/mace/kernels/opencl/cl/scalar_math.cl b/mace/kernels/opencl/cl/scalar_math.cl new file mode 100644 index 00000000..19678b08 --- /dev/null +++ b/mace/kernels/opencl/cl/scalar_math.cl @@ -0,0 +1,27 @@ +#include + +__kernel void scalar_math(__read_only image2d_t input, /* [c%4 * w * c/4, h * b] */ + __private const float scalar, + __write_only image2d_t output) { + const int w = get_global_id(0); + const int hb = get_global_id(1); + + DATA_TYPE4 in0 = READ_IMAGET(input, SAMPLER, (int2)(w, hb)); + DATA_TYPE4 in1; + in1.x = scalar; + in1.y = scalar; + in1.z = scalar; + in1.w = scalar; + DATA_TYPE4 out; +#if SCALAR_MATH_TYPE == 1 + out = in0 + in1; +#elif SCALAR_MATH_TYPE == 4 + out = in0 - in1; +#elif SCALAR_MATH_TYPE == 0 + out = in0 * in1; +#elif SCALAR_MATH_TYPE == 5 + out = in0 / in1; +#endif + + WRITE_IMAGET(output, (int2)(w, hb), out); +} diff --git a/mace/kernels/opencl/depth_to_space_opencl.cc b/mace/kernels/opencl/depth_to_space_opencl.cc index c39c1a34..46e13fd6 100644 --- a/mace/kernels/opencl/depth_to_space_opencl.cc +++ b/mace/kernels/opencl/depth_to_space_opencl.cc @@ -22,6 +22,7 @@ void DepthToSpaceOpFunctor::operator()( int depth_blocks = 1; const char *kernel_name = nullptr; + index_t kernel_width = input_width; index_t output_height, output_width, output_depth; if (d2s_) { @@ -30,12 +31,14 @@ void DepthToSpaceOpFunctor::operator()( output_depth = input_depth / (block_size_ * block_size_); depth_blocks = RoundUpDiv4(output_depth); kernel_name = "depth_to_space"; + kernel_width = output_width; } else { output_height = input_height / block_size_; output_width = input_width / block_size_; output_depth = input_depth * block_size_ * block_size_; depth_blocks = RoundUpDiv4(input_depth); kernel_name = "space_to_depth"; + kernel_width = input_width; } std::vector output_shape = {batch, output_height, output_width, @@ -53,16 +56,17 @@ void DepthToSpaceOpFunctor::operator()( kernel_name_ss << "-D" << kernel_name << "=" << obfuscated_kernel_name; built_options.emplace(kernel_name_ss.str()); auto dt = DataTypeToEnum::value; - built_options.emplace("-DDATA_TYPE=" + DtToUpstreamCLDt(dt)); - built_options.emplace("-DCMD_DATA_TYPE=" + DtToUpstreamCLCMDDt(dt)); + built_options.emplace("-DDATA_TYPE=" + DtToCLDt(dt)); + built_options.emplace("-DCMD_DATA_TYPE=" + DtToCLCMDDt(dt)); kernel_ = - runtime->BuildKernel("depth_to_space", kernel_name, built_options); + runtime->BuildKernel("depth_to_space", + obfuscated_kernel_name, built_options); } if (!IsVecEqual(input_shape_, input->shape())) { uint32_t idx = 0; kernel_.setArg(idx++, *(input->opencl_image())); - kernel_.setArg(idx++, block_size_); - kernel_.setArg(idx++, depth_blocks); + kernel_.setArg(idx++, static_cast(block_size_)); + kernel_.setArg(idx++, static_cast(depth_blocks)); kernel_.setArg(idx++, *(output->opencl_image())); input_shape_ = input->shape(); } @@ -74,8 +78,7 @@ void DepthToSpaceOpFunctor::operator()( const std::vector lws = {8, 16, 8, 1}; std::stringstream ss; ss << "depth_to_space_opencl_kernel_" << output->dim(0) << "_" - << output->dim(1) << "_" << output->dim(2) << "_" << output->dim(3); - + << output->dim(1) << "_" << output->dim(2) << "_" << depth_blocks; TuningOrRun3DKernel(kernel_, ss.str(), gws, lws, future); } else { const uint32_t gws[3] = {static_cast(depth_blocks), @@ -83,8 +86,8 @@ void DepthToSpaceOpFunctor::operator()( static_cast(input_height * batch)}; const std::vector lws = {8, 16, 8, 1}; std::stringstream ss; - ss << "space_to_depth_opencl_kernel_" << input->dim(0) << "_" - << input->dim(1) << "_" << input->dim(2) << "_" << input->dim(3); + ss << "depth_to_space_opencl_kernel_" << input->dim(0) << "_" + << input->dim(1) << "_" << input->dim(2) << "_" << depth_blocks; TuningOrRun3DKernel(kernel_, ss.str(), gws, lws, future); } } diff --git a/mace/kernels/opencl/eltwise_opencl.cc b/mace/kernels/opencl/eltwise_opencl.cc index 548d907d..fd3766c7 100644 --- a/mace/kernels/opencl/eltwise_opencl.cc +++ b/mace/kernels/opencl/eltwise_opencl.cc @@ -22,7 +22,7 @@ void EltwiseFunctor::operator()(const Tensor *input0, const index_t channel_blocks = RoundUpDiv4(channels); const index_t width_pixels = channel_blocks * width; - const index_t batch_height_pixels = batch * height; + const index_t batch_height_pixels = batch * height; if (kernel_.get() == nullptr) { auto runtime = OpenCLRuntime::Global(); diff --git a/mace/kernels/opencl/negative_opencl.cc b/mace/kernels/opencl/negative_opencl.cc new file mode 100644 index 00000000..70f866d8 --- /dev/null +++ b/mace/kernels/opencl/negative_opencl.cc @@ -0,0 +1,65 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/kernels/negative.h" +#include "mace/core/runtime/opencl/cl2_header.h" +#include "mace/core/runtime/opencl/opencl_runtime.h" +#include "mace/kernels/opencl/helper.h" +#include "mace/utils/utils.h" + +namespace mace { +namespace kernels { + +template +void NegFunctor::operator()(const Tensor *input, + Tensor *output, + StatsFuture *future) { + const index_t batch = input->dim(0); + const index_t height = input->dim(1); + const index_t width = input->dim(2); + const index_t channels = input->dim(3); + + const index_t channel_blocks = RoundUpDiv4(channels); + + auto runtime = OpenCLRuntime::Global(); + if (kernel_.get() == nullptr) { + std::set built_options; + auto dt = DataTypeToEnum::value; + std::string kernel_name = MACE_OBFUSCATE_SYMBOL("neg"); + built_options.emplace("-Dneg=" + kernel_name); + built_options.emplace("-DDATA_TYPE=" + DtToUpstreamCLDt(dt)); + built_options.emplace("-DCMD_DATA_TYPE=" + DtToUpstreamCLCMDDt(dt)); + kernel_ = runtime->BuildKernel("neg", kernel_name, built_options); + } + if (!IsVecEqual(input_shape_, input->shape())) { + uint32_t idx = 0; + kernel_.setArg(idx++, *(input->opencl_image())); + kernel_.setArg(idx++, *(output->opencl_image())); + input_shape_ = input->shape(); + } + + const uint32_t gws[3] = {static_cast(channel_blocks), + static_cast(width), + static_cast(height * batch)}; + const std::vector lws = {8, 16, 8}; + + cl::Event event; + cl_int error = runtime->command_queue().enqueueNDRangeKernel( + kernel_, cl::NullRange, cl::NDRange(gws[0], gws[1], gws[2]), + cl::NDRange(lws[0], lws[1], lws[2]), nullptr, &event); + MACE_CHECK(error == CL_SUCCESS); + if (future != nullptr) { + future->wait_fn = [runtime, event](CallStats *stats) { + event.wait(); + if (stats != nullptr) { + runtime->GetCallStats(event, stats); + } + }; + } +} + +template struct NegFunctor; +template struct NegFunctor; +} // namespace kernels +} // namespace mace diff --git a/mace/kernels/opencl/scalar_math_opencl.cc b/mace/kernels/opencl/scalar_math_opencl.cc new file mode 100644 index 00000000..42ad7518 --- /dev/null +++ b/mace/kernels/opencl/scalar_math_opencl.cc @@ -0,0 +1,57 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/kernels/scalar_math.h" +#include "mace/core/runtime/opencl/opencl_runtime.h" +#include "mace/kernels/opencl/helper.h" +#include "mace/utils/tuner.h" + +namespace mace { +namespace kernels { + +template +void ScalarMathFunctor::operator()(const Tensor *input, + Tensor *output, + StatsFuture *future) { + const index_t batch = input->dim(0); + const index_t height = input->dim(1); + const index_t width = input->dim(2); + const index_t channels = input->dim(3); + + const index_t channel_blocks = RoundUpDiv4(channels); + const index_t width_pixels = channel_blocks * width; + const index_t batch_height_pixels = batch * height; + + if (kernel_.get() == nullptr) { + auto runtime = OpenCLRuntime::Global(); + std::set built_options; + auto dt = DataTypeToEnum::value; + std::string kernel_name = MACE_OBFUSCATE_SYMBOL("scalar_math"); + built_options.emplace("-Dscalar_math=" + kernel_name); + built_options.emplace("-DDATA_TYPE=" + DtToUpstreamCLDt(dt)); + built_options.emplace("-DCMD_DATA_TYPE=" + DtToUpstreamCLCMDDt(dt)); + built_options.emplace(MakeString("-DSCALAR_MATH_TYPE=", type_)); + kernel_ = runtime->BuildKernel("scalar_math", kernel_name, built_options); + } + if (!IsVecEqual(input_shape_, input->shape())) { + uint32_t idx = 0; + kernel_.setArg(idx++, *(input->opencl_image())); + kernel_.setArg(idx++, static_cast(coeff_)); + kernel_.setArg(idx++, *(output->opencl_image())); + input_shape_ = input->shape(); + } + + const uint32_t gws[2] = {static_cast(width_pixels), + static_cast(batch_height_pixels)}; + const std::vector lws = {64, 16, 1}; + std::stringstream ss; + ss << "eltwise_opencl_kernel_" << output->dim(0) << "_" << output->dim(1) + << "_" << output->dim(2) << "_" << output->dim(3); + TuningOrRun2DKernel(kernel_, ss.str(), gws, lws, future); +} + +template struct ScalarMathFunctor; +template struct ScalarMathFunctor; +} // namespace kernels +} // namespace mace diff --git a/mace/kernels/opencl/space_to_batch_opencl.cc b/mace/kernels/opencl/space_to_batch_opencl.cc index fe911fbd..f8bf77da 100644 --- a/mace/kernels/opencl/space_to_batch_opencl.cc +++ b/mace/kernels/opencl/space_to_batch_opencl.cc @@ -42,7 +42,7 @@ void SpaceToBatchFunctor::operator()( built_options.emplace("-DCMD_DATA_TYPE=" + DtToCLCMDDt(DataTypeToEnum::value)); kernel_ = - runtime->BuildKernel("space_to_batch", kernel_name, built_options); + runtime->BuildKernel("space_to_batch", obfuscated_kernel_name, built_options); } if (!IsVecEqual(space_shape_, space_tensor->shape())) { uint32_t idx = 0; diff --git a/mace/kernels/scalar_math.h b/mace/kernels/scalar_math.h new file mode 100644 index 00000000..75c52867 --- /dev/null +++ b/mace/kernels/scalar_math.h @@ -0,0 +1,96 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// +#ifndef MACE_KERNELS_SCALAR_MATH_H_ +#define MACE_KERNELS_SCALAR_MATH_H_ + +#include +#include + +#include "mace/core/future.h" +#include "mace/core/runtime/opencl/cl2_header.h" +#include "mace/core/tensor.h" + +namespace mace { +namespace kernels { + +enum ScalarMathType { + MUL = 0, + ADD = 1, + MAX = 2, + MIN = 3, + SUB = 4, + DIV = 5, +}; + +struct ScalarMathFunctorBase { + ScalarMathFunctorBase(const ScalarMathType type, const float coeff) + : type_(type), coeff_(coeff) {} + + ScalarMathType type_; + float coeff_; +}; + +template +struct ScalarMathFunctor : ScalarMathFunctorBase { + ScalarMathFunctor(const ScalarMathType type, const float coeff) + : ScalarMathFunctorBase(type, coeff) {} + + void operator()(const Tensor *input, + Tensor *output, + StatsFuture *future) { + Tensor::MappingGuard input_guard(input); + Tensor::MappingGuard output_guard(output); + + const T *input_ptr = input->data(); + T *output_ptr = output->mutable_data(); + const index_t size = input->size(); + + switch (type_) { + case MUL: +#pragma omp parallel for + for (index_t i = 0; i < size; ++i) { + output_ptr[i] = coeff_ * input_ptr[i]; + } + break; + case ADD: +#pragma omp parallel for + for (index_t i = 0; i < size; ++i) { + output_ptr[i] = coeff_ + input_ptr[i]; + } + break; + case SUB: +#pragma omp parallel for + for (index_t i = 0; i < size; ++i) { + output_ptr[i] = input_ptr[i] - coeff_; + } + break; + case DIV: +#pragma omp parallel for + for (index_t i = 0; i < size; ++i) { + output_ptr[i] = input_ptr[i] / coeff_; + } + break; + default: + LOG(FATAL) << "ScalarMath op not support type " << type_; + } + } +}; + +template +struct ScalarMathFunctor : ScalarMathFunctorBase { + ScalarMathFunctor(const ScalarMathType type, const float coeff) + : ScalarMathFunctorBase(type, coeff) {} + + void operator()(const Tensor *input, + Tensor *output, + StatsFuture *future); + + cl::Kernel kernel_; + std::vector input_shape_; +}; + +} // namespace kernels +} // namespace mace + +#endif // MACE_KERNELS_SCALAR_MATH_H_ diff --git a/mace/ops/conv_2d.h b/mace/ops/conv_2d.h index 08f1bab2..cf58cc9c 100644 --- a/mace/ops/conv_2d.h +++ b/mace/ops/conv_2d.h @@ -31,7 +31,7 @@ class Conv2dOp : public ConvPool2dOpBase { const Tensor *filter = this->Input(FILTER); const Tensor *bias = this->InputSize() >= 3 ? this->Input(BIAS) : nullptr; Tensor *output = this->Output(OUTPUT); - + functor_(input, filter, bias, output, future); return true; diff --git a/mace/ops/depth_to_space.h b/mace/ops/depth_to_space.h index 78ff3919..ad71396f 100644 --- a/mace/ops/depth_to_space.h +++ b/mace/ops/depth_to_space.h @@ -19,18 +19,16 @@ class DepthToSpaceOp : public Operator { public: DepthToSpaceOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - functor_(OperatorBase::GetSingleArgument("block_size", 1), true) {} + block_size_(OperatorBase::GetSingleArgument("block_size", 1)), + functor_(this->block_size_, true) {} bool Run(StatsFuture *future) override { const Tensor *input = this->Input(INPUT); Tensor *output = this->Output(OUTPUT); MACE_CHECK(input->dim_size() == 4, "input dim should be 4"); - const int block_size = - OperatorBase::GetSingleArgument("block_size", 1); - int input_depth = input->dim(3); - MACE_CHECK(input_depth % (block_size * block_size) == 0, + MACE_CHECK(input_depth % (block_size_ * block_size_) == 0, "input depth should be dividable by block_size * block_size", input->dim(3)); MACE_CHECK((input_depth % 4) == 0, @@ -40,6 +38,7 @@ class DepthToSpaceOp : public Operator { } protected: + const int block_size_; OP_INPUT_TAGS(INPUT); OP_OUTPUT_TAGS(OUTPUT); diff --git a/mace/ops/depth_to_space_test.cc b/mace/ops/depth_to_space_test.cc index ba31174d..835e39b3 100644 --- a/mace/ops/depth_to_space_test.cc +++ b/mace/ops/depth_to_space_test.cc @@ -1,7 +1,9 @@ // // Copyright (c) 2017 XiaoMi All rights reserved. // +#include +#include #include "mace/core/operator.h" #include "mace/ops/ops_test_util.h" @@ -48,6 +50,7 @@ void RunDepthToSpace(const bool d2s, ExpectTensorNear(*expected, *net.GetOutput("Output"), 0.001); } + class SpaceToDepthOpTest : public OpsTestBase {}; TEST_F(SpaceToDepthOpTest, Input2x4x4_B2_CPU) { @@ -70,6 +73,8 @@ TEST_F(SpaceToDepthOpTest, Input2x4x4_B2_OPENCL) { 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}); } + + TEST_F(SpaceToDepthOpTest, Input2x2x4_B2_CPU) { RunDepthToSpace(false, {1, 2, 2, 4}, {1, 2, 3, 4, 5, 6, 7, 8, @@ -132,46 +137,83 @@ TEST_F(DepthToSpaceOpTest, Input1x1x16_B2_OPENCL) { 9, 10, 11, 12, 13, 14, 15, 16}); } -/* -TEST_F(DepthToSpaceOpTest, Input2x2x3_B2_CPU) { - RunDepthToSpace({1, 2, 2, 3}, - {1, 2, 3, 4, 5, 6, - 7, 8, 9, 10, 11, 12}, - 2, - {1, 1, 1, 12}, - {1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12}); +TEST_F(DepthToSpaceOpTest, InputLarger_B2_OPENCL) { + const std::vector in = std::vector(192 * 192 *128, 1.0); + + RunDepthToSpace(true, {1, 192, 192, 128}, + in, + 2, + {1, 384, 384, 32}, + in); +} + + +template +void RandomTest(const bool d2s, const int block_size, + const std::vector &shape) { + testing::internal::LogToStderr(); + srand(time(NULL)); + + // Construct graph + OpsTestNet net; + + const char *ops_name = (d2s) ? "DepthToSpace" : "SpaceToDepth"; + const char *ops_test_name = (d2s) ? "DepthToSpaceTest" : "SpaceToDepthTest"; + + // Add input data + net.AddRandomInput("Input1", shape); + + OpDefBuilder(ops_name, ops_test_name) + .Input("Input1") + .AddIntArg("block_size", block_size) + .Output("Output") + .Finalize(net.NewOperatorDef()); + + // Run + net.RunOp(); + + BufferToImage(&net, "Input1", "InputImg1", + kernels::BufferType::IN_OUT_CHANNEL); + + OpDefBuilder(ops_name, ops_test_name) + .Input("InputImg1") + .AddIntArg("block_size", block_size) + .AddIntArg("T", static_cast(DataTypeToEnum::value)) + .Output("OutputImg") + .Finalize(net.NewOperatorDef()); + + // Run + net.RunOp(D); + + ImageToBuffer(&net, "OutputImg", "OPENCLOutput", + kernels::BufferType::IN_OUT_CHANNEL); + + if (DataTypeToEnum::value == DT_FLOAT) { + ExpectTensorNear(*net.GetTensor("Output"), + *net.GetOutput("OPENCLOutput"), 1e-3); + } else { + ExpectTensorNear(*net.GetTensor("Output"), + *net.GetOutput("OPENCLOutput"), 1e-1); + } } -TEST_F(DepthToSpaceOpTest, Input2x2x3_B2_OPENCL) { - RunDepthToSpace({1, 2, 2, 6}, - {1, 2, 3, 4, 5, 6, - 7, 8, 9, 10, 11, 12 - }, - 2, - {1, 1, 1, 12}, - {1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12}); +TEST_F(DepthToSpaceOpTest, OPENCLRandomFloat) { + RandomTest(true, 2, {1, 192, 192, 128}); } -TEST_F(DepthToSpaceOpTest, Input2x2x2_B2_CPU) { +TEST_F(DepthToSpaceOpTest, OPENCLRandomHalf) { +RandomTest(true, 2, {1, 192, 192, 128}); +} - RunDepthToSpace({1, 2, 2, 2}, - {1, 10, 2, 20, 3, 30, 4, 40}, - 2, - {1, 1, 1, 8}, - {1, 10, 2, 20, 3, 30, 4, 40}); +TEST_F(SpaceToDepthOpTest, OPENCLRandomFloat) { +RandomTest(false, 2, {1, 384, 384, 32}); } -TEST_F(DepthToSpaceOpTest, Input2x2x2_B2_OPENCL) { +TEST_F(SpaceToDepthOpTest, OPENCLRandomHalf) { +RandomTest(false, 2, {1, 384, 384, 32}); +} - RunDepthToSpace({1, 2, 2, 2}, - {1, 10, 2, 20, 3, 30, 4, 40}, - 2, - {1, 1, 1, 8}, - {1, 10, 2, 20, 3, 30, 4, 40}); -}*/ } // namespace test } // namespace ops } // namespace mace diff --git a/mace/ops/neg.cc b/mace/ops/neg.cc new file mode 100644 index 00000000..c4dffe70 --- /dev/null +++ b/mace/ops/neg.cc @@ -0,0 +1,31 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/ops/neg.h" + +namespace mace { +namespace ops { + +void Register_Neg(OperatorRegistry *op_registry) { + REGISTER_OPERATOR(op_registry, OpKeyBuilder("Neg") + .Device(DeviceType::CPU) + .TypeConstraint("T") + .Build(), + NegOp); + + REGISTER_OPERATOR(op_registry, OpKeyBuilder("Neg") + .Device(DeviceType::OPENCL) + .TypeConstraint("T") + .Build(), + NegOp); + + REGISTER_OPERATOR(op_registry, OpKeyBuilder("Neg") + .Device(DeviceType::OPENCL) + .TypeConstraint("T") + .Build(), + NegOp); +} + +} // namespace ops +} // namespace mace diff --git a/mace/ops/neg.h b/mace/ops/neg.h new file mode 100644 index 00000000..0e3be04c --- /dev/null +++ b/mace/ops/neg.h @@ -0,0 +1,39 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#ifndef MACE_OPS_NEG_H_ +#define MACE_OPS_NEG_H_ + +#include + +#include "mace/core/operator.h" +#include "mace/kernels/negative.h" + +namespace mace { +namespace ops { + +template +class NegOp : public Operator { + public: + NegOp(const OperatorDef &operator_def, Workspace *ws) + : Operator(operator_def, ws), + functor_() {} + + bool Run(StatsFuture *future) override { + const Tensor *input_tensor = this->Input(0); + Tensor *output_tensor = this->outputs_[0]; + output_tensor->ResizeLike(input_tensor); + + functor_(input_tensor, output_tensor, future); + return true; + } + + private: + kernels::NegFunctor functor_; +}; + +} // namespace ops +} // namespace mace + +#endif // MACE_OPS_NEGATIVE_H_ diff --git a/mace/ops/neg_benchmark.cc b/mace/ops/neg_benchmark.cc new file mode 100644 index 00000000..4d8f6cce --- /dev/null +++ b/mace/ops/neg_benchmark.cc @@ -0,0 +1,82 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/core/operator.h" +#include "mace/core/runtime/opencl/opencl_runtime.h" +#include "mace/core/testing/test_benchmark.h" +#include "mace/ops/ops_test_util.h" + +namespace mace { +namespace ops { +namespace test { + +template +static void Neg(int iters, int batch, int channels, int height, int width) { + mace::testing::StopTiming(); + + OpsTestNet net; + + // Add input data + net.AddRandomInput("Input", {batch, height, width, channels}); + + if (D == DeviceType::OPENCL) { + BufferToImage(&net, "Input", "InputImage", + kernels::BufferType::IN_OUT_CHANNEL); + + OpDefBuilder("Neg", "NegBM") + .Input("InputImage") + .Output("Output") + .Finalize(net.NewOperatorDef()); + } else { + OpDefBuilder("Neg", "NegBM") + .Input("Input") + .Output("Output") + .Finalize(net.NewOperatorDef()); + } + + // Warm-up + for (int i = 0; i < 5; ++i) { + net.RunOp(D); + } + net.Sync(); + + mace::testing::StartTiming(); + while (iters--) { + net.RunOp(D); + } + net.Sync(); +} + +#define BM_NEG_MACRO(N, C, H, W, TYPE, DEVICE) \ + static void BM_NEG_##N##_##C##_##H##_##W##_##TYPE##_##DEVICE( \ + int iters) { \ + const int64_t tot = static_cast(iters) * N * C * H * W; \ + mace::testing::MaccProcessed(tot); \ + mace::testing::BytesProcessed(tot *(sizeof(TYPE))); \ + Neg(iters, N, C, H, W); \ + } \ + BENCHMARK(BM_NEG_##N##_##C##_##H##_##W##_##TYPE##_##DEVICE) + +#define BM_NEG(N, C, H, W) \ + BM_NEG_MACRO(N, C, H, W, float, CPU); \ + BM_NEG_MACRO(N, C, H, W, float, OPENCL); \ + BM_NEG_MACRO(N, C, H, W, half, OPENCL); + +BM_NEG(1, 1, 512, 512); +BM_NEG(1, 3, 128, 128); +BM_NEG(1, 3, 512, 512); +BM_NEG(1, 32, 112, 112); +BM_NEG(1, 64, 256, 256); +BM_NEG(1, 64, 512, 512); +BM_NEG(1, 128, 56, 56); +BM_NEG(1, 128, 256, 256); +BM_NEG(1, 256, 14, 14); +BM_NEG(1, 512, 14, 14); +BM_NEG(1, 1024, 7, 7); +BM_NEG(32, 1, 256, 256); +BM_NEG(32, 3, 256, 256); + +} // namespace test +} // namespace ops +} // namespace mace diff --git a/mace/ops/neg_test.cc b/mace/ops/neg_test.cc new file mode 100644 index 00000000..c7ae15f6 --- /dev/null +++ b/mace/ops/neg_test.cc @@ -0,0 +1,61 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/core/operator.h" +#include "mace/ops/ops_test_util.h" + +namespace mace { +namespace ops { +namespace test { + +class NegOpTest : public OpsTestBase {}; + +template +void NegSimple() { + OpsTestNet net; + + // Add input data + net.AddInputFromArray("Input", {1, 6, 2, 1}, + {5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}); + + if (D == DeviceType::OPENCL) { + BufferToImage(&net, "Input", "InputImage", + kernels::BufferType::IN_OUT_CHANNEL); + + OpDefBuilder("Neg", "NegTest") + .Input("InputImage") + .Output("OutputImage") + .Finalize(net.NewOperatorDef()); + // Run + net.RunOp(D); + + // Transfer output + ImageToBuffer(&net, "OutputImage", "Output", + kernels::BufferType::IN_OUT_CHANNEL); + } else { + OpDefBuilder("Neg", "NegTest") + .Input("Input") + .Output("Output") + .Finalize(net.NewOperatorDef()); + // Run + net.RunOp(D); + } + + // Check + auto expected = CreateTensor( + {1, 6, 2, 1}, + {-5, -5, -7, -7, -9, -9, -11, -11, -13, -13, -15, -15}); + + ExpectTensorNear(*expected, *net.GetOutput("Output"), 1e-8); +} + +TEST_F(NegOpTest, NegSimpleCPU) { NegSimple(); } + +TEST_F(NegOpTest, NegSimpleOPENCL) { + NegSimple(); +} + +} // namespace test +} // namespace ops +} // namespace mace diff --git a/mace/ops/scalar_math.cc b/mace/ops/scalar_math.cc new file mode 100644 index 00000000..9891994f --- /dev/null +++ b/mace/ops/scalar_math.cc @@ -0,0 +1,31 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/ops/scalar_math.h" + +namespace mace { +namespace ops { + +void Register_ScalarMath(OperatorRegistry *op_registry) { + REGISTER_OPERATOR(op_registry, OpKeyBuilder("ScalarMath") + .Device(DeviceType::CPU) + .TypeConstraint("T") + .Build(), + ScalarMathOp); + + REGISTER_OPERATOR(op_registry, OpKeyBuilder("ScalarMath") + .Device(DeviceType::OPENCL) + .TypeConstraint("T") + .Build(), + ScalarMathOp); + + REGISTER_OPERATOR(op_registry, OpKeyBuilder("ScalarMath") + .Device(DeviceType::OPENCL) + .TypeConstraint("T") + .Build(), + ScalarMathOp); +} + +} // namespace ops +} // namespace mace diff --git a/mace/ops/scalar_math.h b/mace/ops/scalar_math.h new file mode 100644 index 00000000..2f0f4394 --- /dev/null +++ b/mace/ops/scalar_math.h @@ -0,0 +1,49 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#ifndef MACE_OPS_SCALAR_MATH_H_ +#define MACE_OPS_SCALAR_MATH_H_ + +#include + +#include "mace/core/operator.h" +#include "mace/kernels/scalar_math.h" + +namespace mace { +namespace ops { + +template +class ScalarMathOp : public Operator { + public: + ScalarMathOp(const OperatorDef &operator_def, Workspace *ws) + : Operator(operator_def, ws), + x_(OperatorBase::GetSingleArgument("x", 1.0)), + functor_(static_cast( + OperatorBase::GetSingleArgument( + "type", static_cast( + kernels::ScalarMathType::ADD))), + this->x_) {} + + bool Run(StatsFuture *future) override { + const Tensor *input_tensor = this->Input(INPUT); + Tensor *output_tensor = this->Output(OUTPUT); + output_tensor->ResizeLike(input_tensor); + + functor_(input_tensor, output_tensor, future); + return true; + } + + protected: + const float x_; + OP_INPUT_TAGS(INPUT); + OP_OUTPUT_TAGS(OUTPUT); + + private: + kernels::ScalarMathFunctor functor_; +}; + +} // namespace ops +} // namespace mace + +#endif // MACE_OPS_SCALAR_MATH_H_ diff --git a/mace/ops/scalar_math_benchmark.cc b/mace/ops/scalar_math_benchmark.cc new file mode 100644 index 00000000..90351326 --- /dev/null +++ b/mace/ops/scalar_math_benchmark.cc @@ -0,0 +1,88 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/core/operator.h" +#include "mace/core/runtime/opencl/opencl_runtime.h" +#include "mace/core/testing/test_benchmark.h" +#include "mace/ops/ops_test_util.h" + +namespace mace { +namespace ops { +namespace test { + +template +static void ScalarMath(int iters, int batch, int channels, + int height, int width, float x, int type) { + mace::testing::StopTiming(); + + OpsTestNet net; + + // Add input data + net.AddRandomInput("Input", {batch, height, width, channels}); + + if (D == DeviceType::OPENCL) { + BufferToImage(&net, "Input", "InputImage", + kernels::BufferType::IN_OUT_CHANNEL); + OpDefBuilder("ScalarMath", "ScalarMathBM") + .Input("InputImage") + .Output("Output") + .AddIntArg("type", type) + .AddFloatArg("x", x) + .Finalize(net.NewOperatorDef()); + } else { + OpDefBuilder("ScalarMath", "ScalarMathBM") + .Input("Input") + .Output("Output") + .AddIntArg("type", type) + .AddFloatArg("x", x) + .Finalize(net.NewOperatorDef()); + } + + // Warm-up + for (int i = 0; i < 5; ++i) { + net.RunOp(D); + } + net.Sync(); + + mace::testing::StartTiming(); + while (iters--) { + net.RunOp(D); + } + net.Sync(); +} + +#define BM_SCALAR_MATH_MACRO(N, C, H, W, X, G, TYPE, DEVICE) \ + static void \ + BM_SCALAR_MATH_##N##_##C##_##H##_##W##_##X##_##G##_##TYPE##_##DEVICE( \ + int iters) { \ + const int64_t tot = static_cast(iters) * N * C * H * W; \ + mace::testing::MaccProcessed(tot); \ + mace::testing::BytesProcessed(tot *(sizeof(TYPE))); \ + ScalarMath(iters, N, C, H, W, X, G); \ + } \ + BENCHMARK( \ + BM_SCALAR_MATH_##N##_##C##_##H##_##W##_##X##_##G##_##TYPE##_##DEVICE) + +#define BM_SCALAR_MATH(N, C, H, W, X, G) \ + BM_SCALAR_MATH_MACRO(N, C, H, W, X, G, float, CPU); \ + BM_SCALAR_MATH_MACRO(N, C, H, W, X, G, float, OPENCL); \ + BM_SCALAR_MATH_MACRO(N, C, H, W, X, G, half, OPENCL); + +BM_SCALAR_MATH(1, 1, 512, 512, 2, 0); +BM_SCALAR_MATH(1, 3, 128, 128, 2, 1); +BM_SCALAR_MATH(1, 3, 512, 512, 2, 2); +BM_SCALAR_MATH(1, 32, 112, 112, 2, 3); +BM_SCALAR_MATH(1, 64, 256, 256, 3, 0); +BM_SCALAR_MATH(1, 64, 512, 512, 3, 1); +BM_SCALAR_MATH(1, 128, 56, 56, 3, 2); +BM_SCALAR_MATH(1, 128, 256, 256, 3, 3); +BM_SCALAR_MATH(1, 256, 14, 14, 3, 0); +BM_SCALAR_MATH(1, 512, 14, 14, 3, 1); +BM_SCALAR_MATH(1, 1024, 7, 7, 3, 2); +BM_SCALAR_MATH(32, 1, 256, 256, 3, 3); +BM_SCALAR_MATH(32, 3, 256, 256, 3, 2); + +} // namespace test +} // namespace ops +} // namespace mace diff --git a/mace/ops/scalar_math_test.cc b/mace/ops/scalar_math_test.cc new file mode 100644 index 00000000..3da6176d --- /dev/null +++ b/mace/ops/scalar_math_test.cc @@ -0,0 +1,160 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/core/operator.h" +#include "mace/ops/ops_test_util.h" +#include "../kernels/scalar_math.h" + +namespace mace { +namespace ops { +namespace test { + +class ScalarMathOpTest : public OpsTestBase {}; + + +template +void Simple(const kernels::ScalarMathType type, + const std::vector &shape, + const std::vector &input0, + const float x, + const std::vector &output) { + // Construct graph + OpsTestNet net; + + // Add input data + net.AddInputFromArray("Input1", shape, input0); + + if (D == DeviceType::CPU) { + OpDefBuilder("ScalarMath", "ScalarMathTest") + .Input("Input1") + .AddIntArg("type", static_cast(type)) + .AddFloatArg("x", x) + .Output("Output") + .Finalize(net.NewOperatorDef()); + + // Run + net.RunOp(D); + } else { + BufferToImage(&net, "Input1", "InputImg1", + kernels::BufferType::IN_OUT_CHANNEL); + OpDefBuilder("ScalarMath", "ScalarMathTest") + .Input("InputImg1") + .AddIntArg("type", static_cast(type)) + .AddFloatArg("x", x) + .Output("OutputImg") + .Finalize(net.NewOperatorDef()); + + // Run + net.RunOp(D); + + ImageToBuffer(&net, "OutputImg", "Output", + kernels::BufferType::IN_OUT_CHANNEL); + } + + auto expected = CreateTensor(shape, output); + + ExpectTensorNear(*expected, *net.GetOutput("Output"), 1e-3); +} + +TEST_F(ScalarMathOpTest, CPUSimple) { + Simple(kernels::ScalarMathType::MUL, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 0.1, {0.1, 0.2, .3, .4, .5, .6}); + + Simple(kernels::ScalarMathType::ADD, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 2.0, {3, 4, 5, 6, 7, 8}); + + Simple(kernels::ScalarMathType::DIV, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 0.1, {10, 20, 30, 40, 50, 60}); + + Simple(kernels::ScalarMathType::SUB, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 2.0, {-1, 0, 1, 2, 3, 4}); +} + +TEST_F(ScalarMathOpTest, GPUSimple) { + Simple(kernels::ScalarMathType::MUL, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 0.1, {0.1, 0.2, .3, .4, .5, .6}); + + Simple(kernels::ScalarMathType::ADD, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 2.0, {3, 4, 5, 6, 7, 8}); + + Simple(kernels::ScalarMathType::DIV, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 0.1, {10, 20, 30, 40, 50, 60}); + + Simple(kernels::ScalarMathType::SUB, {1, 1, 2, 3}, + {1, 2, 3, 4, 5, 6}, 2.0, {-1, 0, 1, 2, 3, 4}); +} + +template +void RandomTest(const kernels::ScalarMathType type, + const std::vector &shape) { + testing::internal::LogToStderr(); + srand(time(NULL)); + + // Construct graph + OpsTestNet net; + + // Add input data + net.AddRandomInput("Input1", shape); + + OpDefBuilder("ScalarMath", "ScalarMathTest") + .Input("Input1") + .AddIntArg("type", static_cast(type)) + .AddFloatArg("x", 1.2) + .Output("Output") + .Finalize(net.NewOperatorDef()); + + // Run + net.RunOp(); + + BufferToImage(&net, "Input1", "InputImg1", + kernels::BufferType::IN_OUT_CHANNEL); + + OpDefBuilder("ScalarMath", "ScalarMathTest") + .Input("InputImg1") + .AddIntArg("type", static_cast(type)) + .AddFloatArg("x", 1.2) + .AddIntArg("T", static_cast(DataTypeToEnum::value)) + .Output("OutputImg") + .Finalize(net.NewOperatorDef()); + + // Run + net.RunOp(D); + + ImageToBuffer(&net, "OutputImg", "OPENCLOutput", + kernels::BufferType::IN_OUT_CHANNEL); + + if (DataTypeToEnum::value == DT_FLOAT) { + ExpectTensorNear(*net.GetTensor("Output"), + *net.GetOutput("OPENCLOutput"), 1e-3); + } else { + ExpectTensorNear(*net.GetTensor("Output"), + *net.GetOutput("OPENCLOutput"), 1e-1); + } +} + +TEST_F(ScalarMathOpTest, OPENCLRandomFloat) { + RandomTest(kernels::ScalarMathType::MUL, + {3, 23, 37, 19}); + RandomTest(kernels::ScalarMathType::ADD, + {13, 32, 32, 64}); + RandomTest(kernels::ScalarMathType::SUB, + {3, 32, 32, 64}); + RandomTest(kernels::ScalarMathType::DIV, + {13, 32, 32, 64}); +} + +TEST_F(ScalarMathOpTest, OPENCLRandomHalf) { + RandomTest(kernels::ScalarMathType::MUL, + {3, 23, 37, 19}); + RandomTest(kernels::ScalarMathType::ADD, + {13, 32, 32, 64}); + RandomTest(kernels::ScalarMathType::SUB, + {3, 32, 32, 64}); + RandomTest(kernels::ScalarMathType::DIV, + {13, 32, 32, 64}); +} + +} // namespace test +} // namespace ops +} // namespace mace diff --git a/mace/python/tools/tf_converter_lib.py b/mace/python/tools/tf_converter_lib.py index 01e73645..01a4a540 100644 --- a/mace/python/tools/tf_converter_lib.py +++ b/mace/python/tools/tf_converter_lib.py @@ -19,6 +19,16 @@ pooling_type_mode = { 'MaxPool': 2 } +# the order should be the same as eltwise type's order +math_type_mode = { + 'MUL': 0, + 'ADD': 1, + 'MAX': 2, + 'MIN': 3, + 'SUB': 4, + 'DIV': 5 +} + buffer_type_map = { 'CONV2D_FILTER' : 0, 'IN_OUT_CHANNEL' : 1, @@ -623,6 +633,64 @@ class TFConverter(object): self.resolved_ops[op.name] = 1 self.unused_tensor.add(get_input_tensor(op, 1).name) + def convert_neg(self, op): + op_def = self.net_def.op.add() + arg = op_def.arg.add() + arg.name = 'T' + arg.i = self.dt + op_def.name = op.name + op_def.type = "Neg" + op_def.input.extend([input.name for input in op.inputs]) + op_def.output.extend([output.name for output in op.outputs]) + self.add_output_shape(op.outputs, op_def) + self.resolved_ops[op.name] = 1 + + def convert_math(self, op, math_type): + op_def = self.net_def.op.add() + arg = op_def.arg.add() + arg.name = 'T' + arg.i = self.dt + op_def.name = op.name + input_tensor0 = get_input_tensor(op, 0) + input_tensor1 = get_input_tensor(op, 1) + + if input_tensor0.shape == input_tensor1.shape: + op_def.type = "Eltwise" + op_def.input.extend([input.name for input in op.inputs]) + else: + op_def.type = "ScalarMath" + x_value = 0 + if len(input_tensor1.shape)==4: + op_def.input.extend([op.inputs[1].name]) + x_value = get_input_tensor(op, 0).eval().astype(np.float32) + else: + op_def.input.extend([op.inputs[0].name]) + x_value = get_input_tensor(op, 1).eval().astype(np.float32) + x_arg = op_def.arg.add() + x_arg.name = 'x' + x_arg.f = x_value + type_arg = op_def.arg.add() + type_arg.name = 'type' + type_arg.i = math_type_mode[math_type] + op_def.output.extend([output.name for output in op.outputs]) + self.add_output_shape(op.outputs, op_def) + self.resolved_ops[op.name] = 1 + + def convert_depth_to_space(self, op, d2s): + op_def = self.net_def.op.add() + arg = op_def.arg.add() + arg.name = 'T' + arg.i = self.dt + op_def.name = op.name + op_def.type = op.type + op_def.input.extend([op.inputs[0].name]) + op_def.output.extend([output.name for output in op.outputs]) + size_arg = op_def.arg.add() + size_arg.name = 'block_size' + size_arg.i = op.get_attr('block_size') + self.add_output_shape(op.outputs, op_def) + self.resolved_ops[op.name] = 1 + def convert_bias_add(self, op): op_def = mace_pb2.OperatorDef() arg = op_def.arg.add() @@ -850,6 +918,16 @@ class TFConverter(object): self.convert_space_to_batch(op, False) elif op.type == 'BatchToSpaceND': self.convert_space_to_batch(op, True) + elif op.type == 'DepthToSpace': + self.convert_depth_to_space(op, True) + elif op.type == 'SpaceToDepth': + self.convert_depth_to_space(op, False) + elif op.type == 'Neg': + self.convert_neg(op) + elif op.type == 'Mul': + self.convert_math(op, 'MUL') + elif op.type == 'Sub': + self.convert_math(op, 'SUB') elif self.is_softmax(op): self.convert_softmax(op) elif op.type in ['Relu', 'Sigmoid', 'Tanh']: -- GitLab