/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/elementwise_op_function.h" #include "paddle/operators/layer_norm_op.h" #include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; using DataLayout = framework::DataLayout; namespace { template struct SubAndSquareFunctor { inline HOSTDEVICE T operator()(T a, T b) const { return (a - b) * (a - b); } }; template struct DivAndSqrtFunctor { explicit DivAndSqrtFunctor(T epsilon) { epsilon_ = epsilon; } inline HOSTDEVICE T operator()(T a, T b) const { return a / (sqrt(b) + epsilon_); } private: T epsilon_; }; template struct MulFunctor { inline HOSTDEVICE T operator()(T a, T b) const { return a * b; } }; template struct AddFunctor { inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } }; template struct SubFunctor { inline HOSTDEVICE T operator()(T a, T b) const { return a - b; } }; template struct MulInvVarFunctor { inline HOSTDEVICE T operator()(T a, T b) const { return a * std::sqrt(1.0 / b); } }; } // namespace template class LayerNormCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { const float epsilon = ctx.Attr("epsilon"); auto *scale = ctx.Input("Scale"); auto *bias = ctx.Input("Bias"); auto x = *ctx.Input("X"); auto *y = ctx.Output("Y"); auto *mean = ctx.Output("Mean"); auto *var = ctx.Output("Variance"); const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); const auto &x_dims = x.dims(); y->mutable_data(ctx.GetPlace()); mean->mutable_data(ctx.GetPlace()); var->mutable_data(ctx.GetPlace()); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int left = static_cast(matrix_dim[0]); int right = static_cast(matrix_dim[1]); framework::DDim matrix_shape({left, right}); x.Resize(matrix_shape); y->Resize(matrix_shape); auto &dev_ctx = ctx.template device_context(); math::RowwiseMean row_mean; // functor-> get mean row_mean(dev_ctx, x, mean); // functor-> get variance ElementwiseComputeEx, DeviceContext, T>( ctx, &x, mean, /*axis*/ 0, SubAndSquareFunctor(), y); row_mean(dev_ctx, *y, var); // functor-> get norm_out ElementwiseComputeEx, DeviceContext, T>( ctx, &x, mean, /*axis*/ 0, SubFunctor(), y); ElementwiseComputeEx, DeviceContext, T>( ctx, y, var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), y); framework::DDim scale_shape({right}); if (scale) { Tensor scale_matrix = *scale; scale_matrix.Resize(scale_shape); ElementwiseComputeEx, DeviceContext, T>( ctx, y, &scale_matrix, /*axis*/ 1, MulFunctor(), y); } if (bias) { Tensor bias_matrix = *bias; bias_matrix.Resize(scale_shape); ElementwiseComputeEx, DeviceContext, T>( ctx, y, &bias_matrix, /*axis*/ 1, AddFunctor(), y); } y->Resize(x_dims); } }; template class LayerNormCUDAGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { const float epsilon = ctx.Attr("epsilon"); auto x = *ctx.Input("X"); auto mean = *ctx.Input("Mean"); auto var = *ctx.Input("Variance"); auto scale = *ctx.Input("Scale"); auto d_y = *ctx.Input(framework::GradVarName("Y")); const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); // init output auto *d_x = ctx.Output(framework::GradVarName("X")); auto *d_scale = ctx.Output(framework::GradVarName("Scale")); auto *d_bias = ctx.Output(framework::GradVarName("Bias")); const auto &x_dims = x.dims(); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int left = static_cast(matrix_dim[0]); int right = static_cast(matrix_dim[1]); framework::DDim matrix_shape({left, right}); d_y.Resize(matrix_shape); auto &dev_ctx = ctx.template device_context(); math::ColwiseSum colwise_sum; Tensor temp; Tensor temp_norm; if (d_scale || d_x) { x.Resize(matrix_shape); temp.mutable_data(matrix_shape, ctx.GetPlace()); temp_norm.mutable_data(matrix_shape, ctx.GetPlace()); // get x_norm ElementwiseComputeEx, DeviceContext, T>( ctx, &x, &mean, /*axis*/ 0, SubFunctor(), &temp_norm); ElementwiseComputeEx, DeviceContext, T>( ctx, &temp_norm, &var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), &temp_norm); } if (d_bias) { d_bias->mutable_data(ctx.GetPlace()); colwise_sum(dev_ctx, d_y, d_bias); } if (d_scale) { d_scale->mutable_data(ctx.GetPlace()); ElementwiseComputeEx, DeviceContext, T>( ctx, &temp_norm, &d_y, /*axis*/ 0, MulFunctor(), &temp); colwise_sum(dev_ctx, temp, d_scale); } if (d_x) { framework::DDim vec_shape({left}); d_x->mutable_data(ctx.GetPlace()); Tensor temp_vec; temp_vec.mutable_data(vec_shape, ctx.GetPlace()); auto &dev_ctx = ctx.template device_context(); math::RowwiseMean row_mean; if (d_scale) { // dy_dx ElementwiseComputeEx, DeviceContext, T>( ctx, &d_y, &scale, /*axis*/ 1, MulFunctor(), &temp); framework::Copy(temp, ctx.GetPlace(), ctx.device_context(), d_x); // dy_dmean_dx row_mean(dev_ctx, temp, &temp_vec); ElementwiseComputeEx, DeviceContext, T>( ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); // dy_var_dx ElementwiseComputeEx, DeviceContext, T>( ctx, &temp, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); } else { // dy_dx framework::Copy(d_y, ctx.GetPlace(), ctx.device_context(), d_x); // dy_dmean_dx row_mean(dev_ctx, d_y, &temp_vec); ElementwiseComputeEx, DeviceContext, T>( ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); // dy_var_dx ElementwiseComputeEx, DeviceContext, T>( ctx, &d_y, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); } // dy_var_dx row_mean(dev_ctx, temp, &temp_vec); ElementwiseComputeEx, DeviceContext, T>( ctx, &temp_norm, &temp_vec, /*axis*/ 0, MulFunctor(), &temp_norm); ElementwiseComputeEx, DeviceContext, T>( ctx, d_x, &temp_norm, /*axis*/ 0, SubFunctor(), d_x); ElementwiseComputeEx, DeviceContext, T>( ctx, d_x, &var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), d_x); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( layer_norm, ops::LayerNormCUDAKernel, ops::LayerNormCUDAKernel); REGISTER_OP_CUDA_KERNEL( layer_norm_grad, ops::LayerNormCUDAGradKernel, ops::LayerNormCUDAGradKernel);