diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 34c848ac98266321d8a95ecfe187714c085d7dc7..316fb00eb9969a9fc3fcdda9b186ea72a34c0369 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -1073,8 +1073,8 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor { typename dX> void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto temp1 = - static_cast(alpha) * (out < static_cast(0)).template cast(); - auto temp2 = (out >= static_cast(0)).template cast(); + static_cast(alpha) * (out <= static_cast(0)).template cast(); + auto temp2 = (out > static_cast(0)).template cast(); dx.device(d) = dout * (temp1 + temp2).template cast(); } @@ -1418,11 +1418,11 @@ struct LeakyReluGradGradFunctor : public BaseActivationFunctor { auto ddx = framework::EigenVector::Flatten(detail::Ref(ddX)); auto out = framework::EigenVector::Flatten(detail::Ref(Out)); auto ddout = framework::EigenVector::Flatten(detail::Ref(ddOut)); - ddout.device(*d) = - ddx * - ((out >= static_cast(0)).template cast() + - static_cast(alpha) * (out < static_cast(0)).template cast()) - .template cast(); + ddout.device(*d) = ddx * + ((out > static_cast(0)).template cast() + + static_cast(alpha) * + (out <= static_cast(0)).template cast()) + .template cast(); } } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } diff --git a/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cc b/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cc index 77e74e3f81ce7205164681b6459e7a327519eecc..9a06a9a27620300081c7b4847e6b1a91cd08515d 100644 --- a/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cc +++ b/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cc @@ -22,5 +22,10 @@ TEST(leaky_relu_grad_grad, test_cpu) { TestLeakyReluGradGradMain({32, 64}, platform::CPUPlace(), 0.02)); } +TEST(leaky_relu_grad_grad, test_cpu_zero_alpha) { + ASSERT_TRUE( + TestLeakyReluGradGradMain({32, 64}, platform::CPUPlace(), 0.0)); +} + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cu b/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cu index bb1afaea621ea35c3a239d35c0150f2e33684542..6f0f840b8c5d4ddda4c3fc5e8a525905cbce4850 100644 --- a/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cu +++ b/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.cu @@ -22,5 +22,10 @@ TEST(leaky_relu_grad_grad, test_gpu) { TestLeakyReluGradGradMain({32, 64}, platform::CUDAPlace(0), 0.15)); } +TEST(leaky_relu_grad_grad, test_gpu_zero_alpha) { + ASSERT_TRUE( + TestLeakyReluGradGradMain({32, 64}, platform::CUDAPlace(0), 0.0)); +} + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.h b/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.h index fe9bf969b1de3893712c3f90715aac303a1d6ef5..f416aa6e00f5a4a82c2562c36f9d32bb1a6843aa 100644 --- a/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.h +++ b/paddle/fluid/operators/test_leaky_relu_grad_grad_functor.h @@ -46,7 +46,7 @@ struct LeakyReluGradGradEachElementFunctor { : ddx_(ddx), out_(out), alpha_(alpha), ddout_(ddout) {} HOSTDEVICE void operator()(int idx) { - if (out_[idx] >= 0) { + if (out_[idx] > 0) { ddout_[idx] = ddx_[idx]; } else { ddout_[idx] = ddx_[idx] * alpha_;