未验证 提交 ac47d003 编写于 作者: W Weilong Wu 提交者: GitHub

add sigmoid custom grad for prim (#51768)

上级 52e1742f
...@@ -504,6 +504,13 @@ void exp_grad(const Tensor& out, const Tensor& out_grad, Tensor* x_grad) { ...@@ -504,6 +504,13 @@ void exp_grad(const Tensor& out, const Tensor& out_grad, Tensor* x_grad) {
} }
} }
template <typename T>
void sigmoid_grad(const Tensor& out, const Tensor& out_grad, Tensor* x_grad) {
if (x_grad) {
set_output<T>(out_grad * (out * (1 - out)), x_grad);
}
}
template <typename T> template <typename T>
void abs_grad(const Tensor& x, const Tensor& out_grad, Tensor* x_grad) { void abs_grad(const Tensor& x, const Tensor& out_grad, Tensor* x_grad) {
if (x_grad) { if (x_grad) {
......
...@@ -1298,6 +1298,7 @@ ...@@ -1298,6 +1298,7 @@
func : sigmoid_grad func : sigmoid_grad
backward : sigmoid_double_grad backward : sigmoid_double_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
composite : sigmoid_grad(out, out_grad, x_grad)
- backward_op : sigmoid_triple_grad - backward_op : sigmoid_triple_grad
forward : sigmoid_double_grad (Tensor out, Tensor fwd_grad_out, Tensor grad_grad_x) -> Tensor(grad_out), Tensor(grad_grad_out) forward : sigmoid_double_grad (Tensor out, Tensor fwd_grad_out, Tensor grad_grad_x) -> Tensor(grad_out), Tensor(grad_grad_out)
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import parameterized as param
import paddle
import paddle.nn.functional as F
from paddle.fluid import core
@param.parameterized_class(
('primal', 'cotangent', 'dtype'),
[
(np.random.rand(10, 10), np.random.rand(10, 10), np.float32),
],
)
class TestExpGradComp(unittest.TestCase):
@classmethod
def setUpClass(cls):
core.set_prim_eager_enabled(True)
cls.primal = cls.primal.astype(cls.dtype)
if cls.cotangent is not None:
cls.cotangent = cls.cotangent.astype(cls.dtype)
def setUp(self):
paddle.enable_static()
def tearDown(self):
paddle.disable_static()
def test_sigmoid_grad_comp(self):
def actual(primal, cotangent):
core.set_prim_eager_enabled(True)
paddle.disable_static()
x = paddle.to_tensor(primal)
dout = paddle.to_tensor(cotangent)
x.stop_gradient = False
return paddle.grad(F.sigmoid(x), x, dout)[0]
def desired(primal, cotangent):
core.set_prim_eager_enabled(False)
paddle.disable_static()
x = paddle.to_tensor(primal)
dout = paddle.to_tensor(cotangent)
x.stop_gradient = False
return paddle.grad(F.sigmoid(x), x, dout)[0]
np.testing.assert_allclose(
actual=actual(self.primal, self.cotangent),
desired=desired(self.primal, self.cotangent),
rtol=1e-6,
atol=0,
)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import parameterized as param
import paddle
import paddle.nn.functional as F
from paddle.fluid import core
@param.parameterized_class(
('primal', 'cotangent', 'dtype'),
[
(np.random.rand(10, 10), np.random.rand(10, 10), np.float32),
],
)
class TestExpGradComp(unittest.TestCase):
@classmethod
def setUpClass(cls):
core.set_prim_eager_enabled(True)
cls.primal = cls.primal.astype(cls.dtype)
if cls.cotangent is not None:
cls.cotangent = cls.cotangent.astype(cls.dtype)
def setUp(self):
paddle.enable_static()
def tearDown(self):
paddle.disable_static()
def test_sigmoid_grad_comp(self):
def actual(primal, cotangent):
core._set_prim_backward_enabled(True)
paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp):
x = paddle.static.data('primal', primal.shape, primal.dtype)
dout = paddle.static.data(
'cotangent', cotangent.shape, cotangent.dtype
)
x.stop_gradient = False
res = F.sigmoid(x)
x_grad = paddle.static.gradients(res, [x], dout)
exe = paddle.static.Executor()
exe.run(sp)
out = exe.run(
program=mp,
feed={
'primal': primal,
'cotangent': cotangent,
},
fetch_list=[
x_grad[0].name,
],
)
return out[0]
def desired(primal, cotangent):
core._set_prim_backward_enabled(False)
paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp):
x = paddle.static.data('primal', primal.shape, primal.dtype)
dout = paddle.static.data(
'cotangent', cotangent.shape, cotangent.dtype
)
x.stop_gradient = False
res = F.sigmoid(x)
x_grad = paddle.static.gradients(res, [x], dout)
exe = paddle.static.Executor()
exe.run(sp)
out = exe.run(
program=mp,
feed={
'primal': primal,
'cotangent': cotangent,
},
fetch_list=[
x_grad[0].name,
],
)
return out[0]
np.testing.assert_allclose(
actual=actual(self.primal, self.cotangent),
desired=desired(self.primal, self.cotangent),
rtol=1e-6,
atol=0,
)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册