diff --git a/mace/kernels/opencl/resize_bilinear_opencl.cc b/mace/kernels/opencl/resize_bilinear_opencl.cc index f0147abdaa7691c091b119d3d318033eecbedd89..6ad4a2d661ca092dfeb8198dda6519aa5c50bb44 100644 --- a/mace/kernels/opencl/resize_bilinear_opencl.cc +++ b/mace/kernels/opencl/resize_bilinear_opencl.cc @@ -19,15 +19,9 @@ void ResizeBilinearFunctor::operator()( index_t out_height; index_t out_width; - { - MACE_CHECK(resize_dims->dim_size() == 1); - Tensor::MappingGuard resize_dims_mapper(resize_dims); - auto dims_data = resize_dims->data(); - out_height = dims_data[0]; - out_width = dims_data[1]; - } - - std::vector out_shape{batch, channels, out_height, out_width}; + GetOutputSize(resize_dims, &out_height, &out_width); + MACE_CHECK(out_height > 0 && out_width > 0); + std::vector out_shape {batch, channels, out_height, out_width}; output->Resize(out_shape); float height_scale = @@ -52,6 +46,7 @@ void ResizeBilinearFunctor::operator()( rb_kernel, cl::NullRange, cl::NDRange(static_cast(batch * channels), static_cast(out_height), static_cast(out_width)), + // TODO (heliangliang) tuning and fix when kwg_size < devisor cl::NDRange(1, 16, kwg_size / 16)); MACE_CHECK(error == CL_SUCCESS, error); } diff --git a/mace/kernels/resize_bilinear.h b/mace/kernels/resize_bilinear.h index aaed3d9c2f7b8d3a983f2500722283072a0bb25a..59986cf6d8ca4ba5a16887545e7e019f6588dda8 100644 --- a/mace/kernels/resize_bilinear.h +++ b/mace/kernels/resize_bilinear.h @@ -1,7 +1,6 @@ // // Copyright (c) 2017 XiaoMi All rights reserved. // - #ifndef MACE_KERNELS_RESIZE_BILINEAR_H_ #define MACE_KERNELS_RESIZE_BILINEAR_H_ @@ -101,30 +100,24 @@ void ResizeImage(const T *images, } template -struct ResizeBilinearFunctor { - bool align_corners_; - - ResizeBilinearFunctor(bool align_corners) : align_corners_(align_corners) {} +class ResizeBilinearFunctor { + public: + ResizeBilinearFunctor(const std::vector &size, bool align_corners) + : align_corners_(align_corners), size_(size) {} void operator()(const Tensor *input, const Tensor *resize_dims, Tensor *output) { - index_t n = input->dim(0); - index_t channels = input->dim(1); - index_t in_height = input->dim(2); - index_t in_width = input->dim(3); + const index_t batch = input->dim(0); + const index_t channels = input->dim(1); + const index_t in_height = input->dim(2); + const index_t in_width = input->dim(3); index_t out_height; index_t out_width; - { - MACE_CHECK(resize_dims->dim_size() == 1); - Tensor::MappingGuard resize_dims_mapper(resize_dims); - auto dims_data = resize_dims->data(); - out_height = dims_data[0]; - out_width = dims_data[1]; - } - - vector out_shape{n, channels, out_height, out_width}; + GetOutputSize(resize_dims, &out_height, &out_width); + MACE_CHECK(out_height > 0 && out_width > 0); + std::vector out_shape{batch, channels, out_height, out_width}; output->Resize(out_shape); Tensor::MappingGuard input_mapper(input); @@ -150,9 +143,29 @@ struct ResizeBilinearFunctor { ComputeInterpolationWeights(out_height, in_height, height_scale, ys.data()); ComputeInterpolationWeights(out_width, in_width, width_scale, xs.data()); - ResizeImage(input_data, n, in_height, in_width, out_height, out_width, + ResizeImage(input_data, batch, in_height, in_width, out_height, out_width, channels, xs, ys, output_data); } + + protected: + void GetOutputSize(const Tensor *resize_dims, + index_t *out_height, + index_t *out_width) { + if (size_[0] < 0 || size_[1] < 0) { + MACE_CHECK(resize_dims != nullptr && resize_dims->dim_size() == 1); + Tensor::MappingGuard resize_dims_mapper(resize_dims); + auto dims_data = resize_dims->data(); + *out_height = dims_data[0]; + *out_width = dims_data[1]; + } else { + *out_height = size_[0]; + *out_width = size_[1]; + } + } + + private: + bool align_corners_; + std::vector size_; }; template <> diff --git a/mace/ops/resize_bilinear.h b/mace/ops/resize_bilinear.h index e25e8ebc9797513a49335d065b72d397b8aec720..23c5edab8f938cba3cd145bc8c35bd50b7ac9cd1 100644 --- a/mace/ops/resize_bilinear.h +++ b/mace/ops/resize_bilinear.h @@ -16,6 +16,7 @@ class ResizeBilinearOp : public Operator { ResizeBilinearOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), functor_( + OperatorBase::GetRepeatedArgument("size", {-1, -1}), OperatorBase::GetSingleArgument("align_corners", false)) {} bool Run() override { diff --git a/mace/ops/resize_bilinear_benchmark.cc b/mace/ops/resize_bilinear_benchmark.cc index c36fd7c9140d369f1c8df2ae8f2b4a2abf05f149..8429fd6bee0f8617e98268cd4ce97be43935a44c 100644 --- a/mace/ops/resize_bilinear_benchmark.cc +++ b/mace/ops/resize_bilinear_benchmark.cc @@ -23,6 +23,7 @@ static void ResizeBilinearBenchmark(int iters, .Input("Input") .Input("OutSize") .Output("Output") + .AddIntsArg("size", {output_height, output_width}) .Finalize(net.NewOperatorDef()); // Add input data diff --git a/mace/ops/resize_bilinear_test.cc b/mace/ops/resize_bilinear_test.cc index 92f7ec4671dcd4bbc73f03ab6c4459bfa3ecd85a..c64a38151658eb1f9a84953c1113e83bcd9f32a5 100644 --- a/mace/ops/resize_bilinear_test.cc +++ b/mace/ops/resize_bilinear_test.cc @@ -66,12 +66,12 @@ void TestRandomResizeBilinear() { srand(time(nullptr)); testing::internal::LogToStderr(); for (int round = 0; round < 10; ++round) { - index_t batch = 1 + rand() % 5; - index_t channels = 1 + rand() % 100; - index_t height = 1 + rand() % 100; - index_t width = 1 + rand() % 100; - index_t in_height = 1 + rand() % 100; - index_t in_width = 1 + rand() % 100; + int batch = 1 + rand() % 5; + int channels = 1 + rand() % 100; + int height = 1 + rand() % 100; + int width = 1 + rand() % 100; + int in_height = 1 + rand() % 100; + int in_width = 1 + rand() % 100; // Construct graph OpsTestNet net; @@ -80,6 +80,7 @@ void TestRandomResizeBilinear() { .Input("OutSize") .Output("Output") .AddIntArg("align_corners", 1) + .AddIntsArg("size", {height, width}) .Finalize(net.NewOperatorDef()); // Add input data