/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/inplace_abn_op.h" #include "paddle/fluid/operators/sync_batch_norm_op.cu.h" namespace paddle { namespace operators { template class InplaceABNKernel : public paddle::operators::SyncBatchNormKernel, public paddle::operators::BatchNormKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* y = ctx.Output("Y"); auto* x = ctx.Input("X"); PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument( "X and Y not inplaced in inplace mode")); auto activation = GetInplaceABNActivationType(ctx.Attr("activation")); auto& place = *ctx.template device_context().eigen_device(); if (ctx.Attr("use_sync_bn")) { SyncBatchNormKernel::Compute(ctx); } else { BatchNormKernel::Compute(ctx); } auto cur_y = EigenVector::Flatten(*y); InplaceABNActivation functor; functor.Compute(ctx, activation, place, cur_y, cur_y); } }; // Deriving the Gradient for the Backward Pass of Batch Normalization // https://kevinzakka.github.io/2016/09/14/batch_normalization/ template class InplaceABNGradKernel : public paddle::operators::SyncBatchNormGradKernel, public paddle::operators::BatchNormGradKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* y = ctx.Input("Y"); auto* d_y = ctx.Input(framework::GradVarName("Y")); auto* d_x = ctx.Output(framework::GradVarName("X")); PADDLE_ENFORCE_EQ(d_x, d_y, platform::errors::InvalidArgument( "X@GRAD and Y@GRAD not inplaced in inplace mode")); auto& place = *ctx.template device_context().eigen_device(); auto activation = GetInplaceABNActivationType(ctx.Attr("activation")); auto py = *y; auto pd_y = *d_y; auto cur_y = EigenVector::Flatten(py); auto cur_dy = EigenVector::Flatten(pd_y); InplaceABNActivation functor; functor.GradCompute(ctx, activation, place, cur_y, cur_y, cur_dy, cur_dy); if (ctx.Attr("use_sync_bn")) { SyncBatchNormGradKernel::Compute(ctx); } else { BatchNormGradKernel::Compute(ctx); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(inplace_abn, ops::InplaceABNKernel, ops::InplaceABNKernel); REGISTER_OP_CUDA_KERNEL( inplace_abn_grad, ops::InplaceABNGradKernel, ops::InplaceABNGradKernel);