From 216d25ac063c5230492f0e4ac7cba0c4a2dfda5f Mon Sep 17 00:00:00 2001 From: ykkk2333 <77383312+ykkk2333@users.noreply.github.com> Date: Wed, 17 Aug 2022 10:29:35 +0800 Subject: [PATCH] add instance norm op for xpu (#45097) * xpu unittest grad compute supports more types, *test=kunlun * add instance norm xpu, *test=kunlun --- .../fluid/operators/instance_norm_op_xpu.cc | 99 ++++++++++++ .../fluid/platform/device/xpu/xpu2_op_list.h | 4 + .../kernels/xpu/instance_norm_grad_kernel.cc | 73 +++++++++ .../phi/kernels/xpu/instance_norm_kernel.cc | 64 ++++++++ .../xpu/test_instance_norm_op_xpu.py | 142 ++++++++++++++++++ 5 files changed, 382 insertions(+) create mode 100644 paddle/fluid/operators/instance_norm_op_xpu.cc create mode 100644 paddle/phi/kernels/xpu/instance_norm_grad_kernel.cc create mode 100644 paddle/phi/kernels/xpu/instance_norm_kernel.cc create mode 100644 python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py diff --git a/paddle/fluid/operators/instance_norm_op_xpu.cc b/paddle/fluid/operators/instance_norm_op_xpu.cc new file mode 100644 index 00000000000..429c5c47d68 --- /dev/null +++ b/paddle/fluid/operators/instance_norm_op_xpu.cc @@ -0,0 +1,99 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_XPU + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device/device_wrapper.h" +#include "paddle/fluid/platform/device/xpu/xpu_header.h" +#include "paddle/phi/kernels/instance_norm_grad_kernel.h" +#include "paddle/phi/kernels/instance_norm_kernel.h" + +namespace paddle { +namespace operators { +using Tensor = framework::Tensor; + +template +class InstanceNormXPUKernel : public framework::OpKernel { + using XPUType = typename XPUTypeTrait::Type; + + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const auto epsilon = ctx.Attr("epsilon"); + const auto* x = ctx.Input("X"); + const auto* scale = ctx.Input("Scale"); + const auto* bias = ctx.Input("Bias"); + auto* y = ctx.Output("Y"); + auto* mean = ctx.Output("SavedMean"); + auto* variance = ctx.Output("SavedVariance"); + auto& dev_ctx = ctx.template device_context(); + + // call phi kernel + phi::InstanceNormKernel( + static_cast::TYPE&>(dev_ctx), + *x, + *scale, + *bias, + epsilon, + y, + mean, + variance); + } +}; +template +class InstanceNormGradXPUKernel : public framework::OpKernel { + using XPUType = typename XPUTypeTrait::Type; + + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const auto epsilon = ctx.Attr("epsilon"); + const auto* x = ctx.Input("X"); + const auto* mean = ctx.Input("SavedMean"); + const auto* variance = ctx.Input("SavedVariance"); + const auto* scale = ctx.Input("Scale"); + const auto* dy = ctx.Input(framework::GradVarName("Y")); + auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dscale = ctx.Output(framework::GradVarName("Scale")); + auto* dbias = ctx.Output(framework::GradVarName("Bias")); + auto& dev_ctx = ctx.template device_context(); + + // call phi kernel + phi::InstanceNormGradKernel( + static_cast::TYPE&>(dev_ctx), + *x, + *dy, + *scale, + *mean, + *variance, + epsilon, + dx, + dbias, + dscale); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_XPU_KERNEL( + instance_norm, + ops::InstanceNormXPUKernel); +REGISTER_OP_XPU_KERNEL( + instance_norm_grad, + ops::InstanceNormGradXPUKernel); + +#endif // PADDLE_WITH_XPU} diff --git a/paddle/fluid/platform/device/xpu/xpu2_op_list.h b/paddle/fluid/platform/device/xpu/xpu2_op_list.h index eb7889a11b5..7fedc9c7607 100644 --- a/paddle/fluid/platform/device/xpu/xpu2_op_list.h +++ b/paddle/fluid/platform/device/xpu/xpu2_op_list.h @@ -289,6 +289,10 @@ XPUOpMap& get_kl2_ops() { {"huber_loss", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"iou_similarity", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, + {"instance_norm", + XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, + {"instance_norm_grad", + XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"label_smooth", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"lars_momentum", diff --git a/paddle/phi/kernels/xpu/instance_norm_grad_kernel.cc b/paddle/phi/kernels/xpu/instance_norm_grad_kernel.cc new file mode 100644 index 00000000000..adf2cd78787 --- /dev/null +++ b/paddle/phi/kernels/xpu/instance_norm_grad_kernel.cc @@ -0,0 +1,73 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/instance_norm_grad_kernel.h" +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void InstanceNormGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const paddle::optional& scale, + const DenseTensor& saved_mean, + const DenseTensor& saved_variance, + const DenseTensor& y_grad, + float epsilon, + DenseTensor* x_grad, + DenseTensor* scale_grad, + DenseTensor* bias_grad) { + using XPUType = typename XPUTypeTrait::Type; + + const auto& x_dims = x.dims(); + int n = x_dims[0]; + int c = x_dims[1]; + int h = x_dims[2]; + int w = x_dims[3]; + + dev_ctx.template Alloc(x_grad); + if (bias_grad != nullptr) { + dev_ctx.template Alloc(bias_grad); + } + if (scale_grad != nullptr) { + dev_ctx.template Alloc(scale_grad); + } + + const auto scale_ptr = scale.get_ptr(); + + int r = xpu::instance_norm_grad( + dev_ctx.x_context(), + reinterpret_cast(x.data()), + reinterpret_cast(y_grad.data()), + reinterpret_cast(x_grad->data()), + scale_ptr->data(), + saved_mean.data(), + saved_variance.data(), + scale_grad->data(), + bias_grad->data(), + n, + c, + h, + w, + epsilon, + true); + + PADDLE_ENFORCE_XDNN_SUCCESS(r, "instance_norm_grad"); +} + +} // namespace phi + +PD_REGISTER_KERNEL( + instance_norm_grad, XPU, ALL_LAYOUT, phi::InstanceNormGradKernel, float) {} diff --git a/paddle/phi/kernels/xpu/instance_norm_kernel.cc b/paddle/phi/kernels/xpu/instance_norm_kernel.cc new file mode 100644 index 00000000000..293397f66ee --- /dev/null +++ b/paddle/phi/kernels/xpu/instance_norm_kernel.cc @@ -0,0 +1,64 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/instance_norm_kernel.h" +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void InstanceNormKernel(const Context& dev_ctx, + const DenseTensor& x, + const paddle::optional& scale, + const paddle::optional& bias, + float epsilon, + DenseTensor* y, + DenseTensor* saved_mean, + DenseTensor* saved_var) { + using XPUType = typename XPUTypeTrait::Type; + + const auto& x_dims = x.dims(); + int n = x_dims[0]; + int c = x_dims[1]; + int h = x_dims[2]; + int w = x_dims[3]; + dev_ctx.template Alloc(y); + dev_ctx.template Alloc(saved_mean); + dev_ctx.template Alloc(saved_var); + + const auto scale_ptr = scale.get_ptr(); + const auto bias_ptr = bias.get_ptr(); + + int r = xpu::instance_norm(dev_ctx.x_context(), + reinterpret_cast(x.data()), + reinterpret_cast(y->data()), + n, + c, + h, + w, + epsilon, + scale_ptr->data(), + bias_ptr->data(), + saved_mean->data(), + saved_var->data(), + true); + + PADDLE_ENFORCE_XDNN_SUCCESS(r, "instance_norm"); +} + +} // namespace phi + +PD_REGISTER_KERNEL( + instance_norm, XPU, ALL_LAYOUT, phi::InstanceNormKernel, float) {} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py new file mode 100644 index 00000000000..bdac4e461dc --- /dev/null +++ b/python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py @@ -0,0 +1,142 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import numpy as np +import sys +import unittest +from functools import reduce + +sys.path.append("..") +from op_test import OpTest +from op_test_xpu import XPUOpTest +from operator import mul +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper + +paddle.enable_static() + + +def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var): + x_shape = x.shape + if len(x_shape) == 2: + x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) + n, c, h, w = x.shape + + mean_tile = np.reshape(mean, (n, c, 1, 1)) + mean_tile = np.tile(mean_tile, (1, 1, h, w)) + var_tile = np.reshape(var, (n, c, 1, 1)) + var_tile = np.tile(var_tile, (1, 1, h, w)) + + x_norm = (x - mean_tile) / np.sqrt(var_tile + epsilon).astype('float32') + scale_tile = np.reshape(scale, (1, c, 1, 1)) + scale_tile = np.tile(scale_tile, (n, 1, h, w)) + bias_tile = np.reshape(bias, (1, c, 1, 1)) + bias_tile = np.tile(bias_tile, (n, 1, h, w)) + y = scale_tile * x_norm + bias_tile + if len(x_shape) == 2: + y = np.reshape(y, x_shape) + return y, mean, var + + +def _cal_mean_variance(x, epsilon, mean_shape): + mean = np.reshape(np.mean(x, axis=(2, 3)), mean_shape) + var = np.reshape(np.var(x, axis=(2, 3)), mean_shape) + return mean, var + + +class XPUTestInstanceNormOp(XPUOpTestWrapper): + + def __init__(self): + self.op_name = 'instance_norm' + self.use_dynamic_create_class = False + + class XPUTestInstanceNormOp(XPUOpTest): + + def setUp(self): + self.op_type = "instance_norm" + self.dtype = self.in_type + self.shape = [2, 3, 4, 5] + self.epsilon = 1e-05 + self.set_attrs() + + np.random.seed(12345) + epsilon = self.epsilon + shape = self.shape + n, c, h, w = shape[0], shape[1], shape[2], shape[3] + scale_shape = [c] + mean_shape = [n * c] + + x_np = np.random.random_sample(shape).astype(self.dtype) + scale_np = np.random.random_sample(scale_shape).astype(np.float32) + bias_np = np.random.random_sample(scale_shape).astype(np.float32) + mean, variance = self.set_global_mean_var(mean_shape, x_np) + + ref_y_np, ref_saved_mean, variance_tmp = _reference_instance_norm_naive( + x_np, scale_np, bias_np, epsilon, mean, variance) + + ref_saved_variance = 1 / np.sqrt(variance_tmp + epsilon) + + self.inputs = {'X': x_np, 'Scale': scale_np, 'Bias': bias_np} + self.outputs = { + 'Y': ref_y_np, + 'SavedMean': ref_saved_mean, + 'SavedVariance': ref_saved_variance + } + self.attrs = {'epsilon': epsilon, 'use_xpu': True} + + def set_global_mean_var(self, mean_shape, x): + mean, variance = _cal_mean_variance(x, self.epsilon, mean_shape) + return mean, variance + + def set_attrs(self): + pass + + def test_check_output(self): + self.check_output_with_place(paddle.XPUPlace(0)) + + def test_check_grad(self): + self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Y') + + class TestXPUInstanceNormOp1(XPUTestInstanceNormOp): + + def set_attrs(self): + self.shape = [10, 12, 32, 32] + + class TestXPUInstanceNormOp2(XPUTestInstanceNormOp): + + def set_attrs(self): + self.shape = [4, 5, 6, 7] + + class TestXPUInstanceNormOp3(XPUTestInstanceNormOp): + + def set_attrs(self): + self.shape = [1, 8, 16, 16] + + class TestXPUInstanceNormOp4(XPUTestInstanceNormOp): + + def set_attrs(self): + self.shape = [4, 16, 256, 128] + + class TestXPUInstanceNormOp5(XPUTestInstanceNormOp): + + def set_attrs(self): + self.shape = [10, 3, 512, 1] + + +support_types = get_xpu_op_support_types('instance_norm') +for stype in support_types: + create_test_class(globals(), XPUTestInstanceNormOp, stype) + +if __name__ == "__main__": + unittest.main() -- GitLab