未验证 提交 00a50af8 编写于 作者: T Tao Luo 提交者: GitHub

refine amax/amin example(#38525)

上级 1fb80a6a
...@@ -1802,11 +1802,24 @@ def amax(x, axis=None, keepdim=False, name=None): ...@@ -1802,11 +1802,24 @@ def amax(x, axis=None, keepdim=False, name=None):
x = paddle.to_tensor([[0.1, 0.9, 0.9, 0.9], x = paddle.to_tensor([[0.1, 0.9, 0.9, 0.9],
[0.9, 0.9, 0.6, 0.7]], [0.9, 0.9, 0.6, 0.7]],
dtype='float64', stop_gradient=False) dtype='float64', stop_gradient=False)
# There are 5 maximum elements:
# 1) amax evenly distributes gradient between these equal values,
# thus the corresponding gradients are 1/5=0.2;
# 2) while max propagates gradient to all of them,
# thus the corresponding gradient are 1.
result1 = paddle.amax(x) result1 = paddle.amax(x)
result1.backward() result1.backward()
print(result1, x.grad) print(result1, x.grad)
#[0.9], [[0., 0.2, 0.2, 0.2], [0.2, 0.2, 0., 0.]] #[0.9], [[0., 0.2, 0.2, 0.2], [0.2, 0.2, 0., 0.]]
x.clear_grad()
result1_max = paddle.max(x)
result1_max.backward()
print(result1_max, x.grad)
#[0.9], [[0., 1.0, 1.0, 1.0], [1.0, 1.0, 0., 0.]]
###############################
x.clear_grad() x.clear_grad()
result2 = paddle.amax(x, axis=0) result2 = paddle.amax(x, axis=0)
result2.backward() result2.backward()
...@@ -1901,11 +1914,24 @@ def amin(x, axis=None, keepdim=False, name=None): ...@@ -1901,11 +1914,24 @@ def amin(x, axis=None, keepdim=False, name=None):
x = paddle.to_tensor([[0.2, 0.1, 0.1, 0.1], x = paddle.to_tensor([[0.2, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.7]], [0.1, 0.1, 0.6, 0.7]],
dtype='float64', stop_gradient=False) dtype='float64', stop_gradient=False)
# There are 5 minimum elements:
# 1) amin evenly distributes gradient between these equal values,
# thus the corresponding gradients are 1/5=0.2;
# 2) while min propagates gradient to all of them,
# thus the corresponding gradient are 1.
result1 = paddle.amin(x) result1 = paddle.amin(x)
result1.backward() result1.backward()
print(result1, x.grad) print(result1, x.grad)
#[0.1], [[0., 0.2, 0.2, 0.2], [0.2, 0.2, 0., 0.]] #[0.1], [[0., 0.2, 0.2, 0.2], [0.2, 0.2, 0., 0.]]
x.clear_grad()
result1_min = paddle.min(x)
result1_min.backward()
print(result1_min, x.grad)
#[0.1], [[0., 1.0, 1.0, 1.0], [1.0, 1.0, 0., 0.]]
###############################
x.clear_grad() x.clear_grad()
result2 = paddle.amin(x, axis=0) result2 = paddle.amin(x, axis=0)
result2.backward() result2.backward()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册