提交 2b7d72d8 编写于 作者: L Liangliang He

Merge branch 'resize' into 'master'

Add OpenCL ResizeBilinear empty kernel

See merge request !92
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/kernels/resize_bilinear.h"
#include "mace/core/tensor.h"
namespace mace {
namespace kernels {
template <>
void ResizeBilinearFunctor<DeviceType::OPENCL, float>::operator()(
const Tensor *input, const Tensor *resize_dims, Tensor *output) {}
} // namespace kernels
} // namespace mace
......@@ -106,16 +106,33 @@ struct ResizeBilinearFunctor {
ResizeBilinearFunctor(bool align_corners) : align_corners_(align_corners) {}
void operator()(const T *input,
T *output,
index_t n,
index_t channels,
index_t in_height,
index_t in_width,
index_t out_height,
index_t out_width) {
void operator()(const Tensor *input,
const Tensor *resize_dims,
Tensor *output) {
index_t n = input->dim(0);
index_t channels = input->dim(1);
index_t in_height = input->dim(2);
index_t in_width = input->dim(3);
index_t out_height;
index_t out_width;
{
MACE_CHECK(resize_dims->dim_size() == 1);
Tensor::MappingGuard resize_dims_mapper(resize_dims);
auto dims_data = resize_dims->data<index_t>();
out_height = dims_data[0];
out_width = dims_data[1];
}
vector<index_t> out_shape{n, channels, out_height, out_width};
output->Resize(out_shape);
const T *input_data = input->data<T>();
T *output_data = output->mutable_data<T>();
if (out_height == in_height && out_width == in_width) {
std::copy(input, input + channels * in_height * in_width, output);
std::copy(input_data, input_data + channels * in_height * in_width,
output_data);
return;
}
......@@ -131,12 +148,16 @@ struct ResizeBilinearFunctor {
ComputeInterpolationWeights(out_height, in_height, height_scale, ys.data());
ComputeInterpolationWeights(out_width, in_width, width_scale, xs.data());
ResizeImage(input, n, in_height, in_width, out_height, out_width, channels,
xs, ys, output);
ResizeImage(input_data, n, in_height, in_width, out_height, out_width,
channels, xs, ys, output_data);
}
};
} // namespace kernels
} // namespace mace
template <>
void ResizeBilinearFunctor<DeviceType::OPENCL, float>::operator()(
const Tensor *input, const Tensor *resize_dims, Tensor *output);
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_RESIZE_BILINEAR_H_
......@@ -13,4 +13,7 @@ REGISTER_NEON_OPERATOR(ResizeBilinear,
ResizeBilinearOp<DeviceType::NEON, float>);
#endif // __ARM_NEON
REGISTER_OPENCL_OPERATOR(ResizeBilinear,
ResizeBilinearOp<DeviceType::OPENCL, float>);
} // namespace mace
......@@ -21,28 +21,15 @@ class ResizeBilinearOp : public Operator<D, T> {
bool Run() override {
const Tensor *input = this->Input(0);
const Tensor *resize_dims = this->Input(1);
Tensor *output = this->Output(0);
MACE_CHECK(input->dim_size() == 4, "input must be 4-dimensional.",
input->dim_size());
MACE_CHECK(resize_dims->dim_size() == 1,
"resize dim must be 2-dimensional.", resize_dims->dim_size());
Tensor *output = this->Output(0);
index_t n = input->dim(0);
index_t channels = input->dim(1);
index_t in_height = input->dim(2);
index_t in_width = input->dim(3);
index_t out_height = resize_dims->data<index_t>()[0];
index_t out_width = resize_dims->data<index_t>()[1];
vector<index_t> out_shape{n, channels, out_height, out_width};
output->Resize(out_shape);
const T *input_ptr = input->data<T>();
T *output_ptr = output->mutable_data<T>();
functor_(input_ptr, output_ptr, n, channels, in_height, in_width,
out_height, out_width);
functor_(input, resize_dims, output);
return true;
}
......@@ -50,6 +37,6 @@ class ResizeBilinearOp : public Operator<D, T> {
kernels::ResizeBilinearFunctor<D, T> functor_;
};
} // namespace mace
} // namespace mace
#endif // MACE_RESIZE_BILINEAR_H
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include <string>
#include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h"
namespace mace {
template <DeviceType D, typename T>
static void ResizeBilinearBenchmark(int iters,
int batch,
int channels,
int input_height,
int input_width,
int output_height,
int output_width) {
mace::testing::StopTiming();
OpsTestNet net;
OpDefBuilder("ResizeBilinear", "ResizeBilinearBenchmark")
.Input("Input")
.Input("OutSize")
.Output("Output")
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>(
"Input", {batch, channels, input_height, input_width});
net.AddInputFromArray<DeviceType::CPU, index_t>(
"OutSize", {2}, {output_height, output_width});
// Warm-up
for (int i = 0; i < 5; ++i) {
net.RunOp(D);
}
mace::testing::StartTiming();
while (iters--) {
net.RunOp(D);
}
}
#define BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, DEVICE) \
static void \
BM_RESIZE_BILINEAR_##N##_##C##_##H0##_##W0##_##H1##_##W1##_##TYPE##_##DEVICE( \
int iters) { \
const int64_t tot = static_cast<int64_t>(iters) * N * C * H1 * W1; \
mace::testing::ItemsProcessed(tot); \
mace::testing::BytesProcessed(tot *(sizeof(TYPE))); \
ResizeBilinearBenchmark<DEVICE, TYPE>(iters, N, C, H0, W0, H1, W1); \
} \
BENCHMARK( \
BM_RESIZE_BILINEAR_##N##_##C##_##H0##_##W0##_##H1##_##W1##_##TYPE##_##DEVICE)
#define BM_RESIZE_BILINEAR(N, C, H0, W0, H1, W1, TYPE) \
BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, CPU); \
BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, NEON); \
BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, OPENCL);
BM_RESIZE_BILINEAR(1, 256, 7, 7, 15, 15, float);
BM_RESIZE_BILINEAR(1, 256, 15, 15, 30, 30, float);
BM_RESIZE_BILINEAR(1, 128, 30, 30, 60, 60, float);
BM_RESIZE_BILINEAR(1, 128, 240, 240, 480, 480, float);
BM_RESIZE_BILINEAR(1, 3, 4032, 3016, 480, 480, float);
BM_RESIZE_BILINEAR(1, 3, 480, 480, 4032, 3016, float);
} // namespace mace
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册