未验证 提交 cafa35e1 编写于 作者: C chentianyu03 提交者: GitHub

Change reduce mean (#27997) (#28004)

* change paddle.fluid.layers.reduce_mean to paddle.mean

* change paddle.fluid.layers.reduce_mean to paddle.mean
上级 3251f9c1
...@@ -56,7 +56,7 @@ class GradScaler(AmpScaler): ...@@ -56,7 +56,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.fluid.layers.reduce_mean(conv) loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
...@@ -96,7 +96,7 @@ class GradScaler(AmpScaler): ...@@ -96,7 +96,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.fluid.layers.reduce_mean(conv) loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
...@@ -128,7 +128,7 @@ class GradScaler(AmpScaler): ...@@ -128,7 +128,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.fluid.layers.reduce_mean(conv) loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
......
...@@ -105,7 +105,7 @@ class ReduceMeanLayer(object): ...@@ -105,7 +105,7 @@ class ReduceMeanLayer(object):
""" """
operation operation
""" """
mean = paddle.fluid.layers.reduce_mean(input) mean = paddle.mean(input)
return mean return mean
......
...@@ -187,7 +187,7 @@ class PtbModel(paddle.nn.Layer): ...@@ -187,7 +187,7 @@ class PtbModel(paddle.nn.Layer):
loss = paddle.nn.functional.softmax_with_cross_entropy( loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False) logits=projection, label=label, soft_label=False)
loss = paddle.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = paddle.fluid.layers.reduce_mean(loss, dim=[0]) loss = paddle.mean(loss, axis=[0])
loss = paddle.fluid.layers.reduce_sum(loss) loss = paddle.fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell return loss, last_hidden, last_cell
......
...@@ -414,7 +414,7 @@ class TestLRScheduler(unittest.TestCase): ...@@ -414,7 +414,7 @@ class TestLRScheduler(unittest.TestCase):
for batch_id in range(2): for batch_id in range(2):
x = paddle.to_tensor(x) x = paddle.to_tensor(x)
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
adam.step() adam.step()
adam.clear_grad() adam.clear_grad()
......
...@@ -73,8 +73,8 @@ class TestRetainGraph(unittest.TestCase): ...@@ -73,8 +73,8 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1) fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
disc_interpolates = netD(fake_AB) disc_interpolates = netD(fake_AB)
outs = paddle.fluid.layers.fill_constant(disc_interpolates.shape, outs = paddle.fluid.layers.fill_constant(
disc_interpolates.dtype, 1.0) disc_interpolates.shape, disc_interpolates.dtype, 1.0)
gradients = paddle.grad( gradients = paddle.grad(
outputs=disc_interpolates, outputs=disc_interpolates,
inputs=fake_AB, inputs=fake_AB,
...@@ -85,9 +85,9 @@ class TestRetainGraph(unittest.TestCase): ...@@ -85,9 +85,9 @@ class TestRetainGraph(unittest.TestCase):
gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1]) gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1])
gradient_penalty = paddle.fluid.layers.reduce_mean((paddle.norm( gradient_penalty = paddle.mean((paddle.norm(gradients + 1e-16, 2, 1)
gradients + 1e-16, 2, 1) - constant)** - constant)**
2) * lambda_gp # added eps 2) * lambda_gp # added eps
return gradient_penalty, gradients return gradient_penalty, gradients
else: else:
return 0.0, None return 0.0, None
...@@ -113,7 +113,8 @@ class TestRetainGraph(unittest.TestCase): ...@@ -113,7 +113,8 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((realA, fakeB), 1) fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB.detach()) G_pred_fake = d(fake_AB.detach())
false_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 0.0) false_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape,
'float32', 0.0)
G_gradient_penalty, _ = self.cal_gradient_penalty( G_gradient_penalty, _ = self.cal_gradient_penalty(
d, realA, fakeB, lambda_gp=10.0) d, realA, fakeB, lambda_gp=10.0)
...@@ -125,7 +126,8 @@ class TestRetainGraph(unittest.TestCase): ...@@ -125,7 +126,8 @@ class TestRetainGraph(unittest.TestCase):
optim_g.clear_gradients() optim_g.clear_gradients()
fake_AB = paddle.concat((realA, fakeB), 1) fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB) G_pred_fake = d(fake_AB)
true_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 1.0) true_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape,
'float32', 1.0)
loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake, loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake,
true_target) true_target)
......
...@@ -229,7 +229,7 @@ class NoamDecay(LRScheduler): ...@@ -229,7 +229,7 @@ class NoamDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -325,7 +325,7 @@ class PiecewiseDecay(LRScheduler): ...@@ -325,7 +325,7 @@ class PiecewiseDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -407,7 +407,7 @@ class NaturalExpDecay(LRScheduler): ...@@ -407,7 +407,7 @@ class NaturalExpDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -485,7 +485,7 @@ class InverseTimeDecay(LRScheduler): ...@@ -485,7 +485,7 @@ class InverseTimeDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -580,7 +580,7 @@ class PolynomialDecay(LRScheduler): ...@@ -580,7 +580,7 @@ class PolynomialDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -695,7 +695,7 @@ class LinearWarmup(LRScheduler): ...@@ -695,7 +695,7 @@ class LinearWarmup(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -798,7 +798,7 @@ class ExponentialDecay(LRScheduler): ...@@ -798,7 +798,7 @@ class ExponentialDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -885,7 +885,7 @@ class MultiStepDecay(LRScheduler): ...@@ -885,7 +885,7 @@ class MultiStepDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -992,7 +992,7 @@ class StepDecay(LRScheduler): ...@@ -992,7 +992,7 @@ class StepDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -1086,7 +1086,7 @@ class LambdaDecay(LRScheduler): ...@@ -1086,7 +1086,7 @@ class LambdaDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -1184,7 +1184,7 @@ class ReduceOnPlateau(LRScheduler): ...@@ -1184,7 +1184,7 @@ class ReduceOnPlateau(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
...@@ -1390,7 +1390,7 @@ class CosineAnnealingDecay(LRScheduler): ...@@ -1390,7 +1390,7 @@ class CosineAnnealingDecay(LRScheduler):
for batch_id in range(2): for batch_id in range(2):
x = paddle.uniform([10, 10]) x = paddle.uniform([10, 10])
out = linear(x) out = linear(x)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
......
...@@ -377,7 +377,7 @@ class Optimizer(object): ...@@ -377,7 +377,7 @@ class Optimizer(object):
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp) inp = paddle.to_tensor(inp)
out = linear(inp) out = linear(inp)
loss = paddle.fluid.layers.reduce_mean(out) loss = paddle.mean(out)
bd = [2, 4, 6, 8] bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0] value = [0.2, 0.4, 0.6, 0.8, 1.0]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册