未验证 提交 ca9339eb 编写于 作者: Z zhiboniu 提交者: GitHub

replace fluid.mean to paddle.mean (#43907)

* change fluid.mean to paddle.mean

* reverse some old code examples
上级 5fbc26e2
...@@ -68,7 +68,7 @@ def train_lenet(lenet, reader, optimizer): ...@@ -68,7 +68,7 @@ def train_lenet(lenet, reader, optimizer):
out = lenet(img) out = lenet(img)
loss = fluid.layers.cross_entropy(out, label) loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
avg_loss.backward() avg_loss.backward()
optimizer.minimize(avg_loss) optimizer.minimize(avg_loss)
......
...@@ -46,7 +46,7 @@ def conv_block(): ...@@ -46,7 +46,7 @@ def conv_block():
act="relu") act="relu")
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return [img, label], avg_loss return [img, label], avg_loss
......
...@@ -118,7 +118,7 @@ class TestImperativeQat(unittest.TestCase): ...@@ -118,7 +118,7 @@ class TestImperativeQat(unittest.TestCase):
out = lenet(img) out = lenet(img)
acc = fluid.layers.accuracy(out, label) acc = fluid.layers.accuracy(out, label)
loss = fluid.layers.cross_entropy(out, label) loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
avg_loss.backward() avg_loss.backward()
adam.minimize(avg_loss) adam.minimize(avg_loss)
lenet.clear_gradients() lenet.clear_gradients()
......
...@@ -115,7 +115,7 @@ class TestImperativeQatAmp(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestImperativeQatAmp(unittest.TestCase):
out = model(img) out = model(img)
acc = fluid.layers.accuracy(out, label) acc = fluid.layers.accuracy(out, label)
loss = fluid.layers.cross_entropy(out, label) loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
scaled_loss = scaler.scale(avg_loss) scaled_loss = scaler.scale(avg_loss)
scaled_loss.backward() scaled_loss.backward()
...@@ -125,7 +125,7 @@ class TestImperativeQatAmp(unittest.TestCase): ...@@ -125,7 +125,7 @@ class TestImperativeQatAmp(unittest.TestCase):
out = model(img) out = model(img)
acc = fluid.layers.accuracy(out, label) acc = fluid.layers.accuracy(out, label)
loss = fluid.layers.cross_entropy(out, label) loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
avg_loss.backward() avg_loss.backward()
adam.minimize(avg_loss) adam.minimize(avg_loss)
......
...@@ -45,7 +45,7 @@ def conv_net(img, label): ...@@ -45,7 +45,7 @@ def conv_net(img, label):
act="relu") act="relu")
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss return avg_loss
......
...@@ -41,7 +41,7 @@ def linear_fc(num): ...@@ -41,7 +41,7 @@ def linear_fc(num):
for _ in six.moves.xrange(num): for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu') hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label) loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
...@@ -92,7 +92,7 @@ def residual_block(num, quant_skip_pattern=None): ...@@ -92,7 +92,7 @@ def residual_block(num, quant_skip_pattern=None):
pool_stride=2) pool_stride=2)
fc = fluid.layers.fc(input=pool, size=10) fc = fluid.layers.fc(input=pool, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label) loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
...@@ -116,7 +116,7 @@ def conv_net(img, label, quant_skip_pattern): ...@@ -116,7 +116,7 @@ def conv_net(img, label, quant_skip_pattern):
with fluid.name_scope(quant_skip_pattern): with fluid.name_scope(quant_skip_pattern):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss return avg_loss
...@@ -620,7 +620,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None): ...@@ -620,7 +620,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None):
pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu') pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu')
fc = fluid.layers.fc(input=pool_add, size=10) fc = fluid.layers.fc(input=pool_add, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label) loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
......
...@@ -53,7 +53,7 @@ def conv_net(img, label): ...@@ -53,7 +53,7 @@ def conv_net(img, label):
hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu') hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss return avg_loss
......
...@@ -48,7 +48,7 @@ def conv_net(img, label): ...@@ -48,7 +48,7 @@ def conv_net(img, label):
hidden = fluid.layers.fc(input=conv_pool_1, size=100, act='relu') hidden = fluid.layers.fc(input=conv_pool_1, size=100, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss return avg_loss
......
...@@ -55,7 +55,7 @@ def conv_net(img, label): ...@@ -55,7 +55,7 @@ def conv_net(img, label):
hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu') hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss return avg_loss
......
...@@ -136,7 +136,7 @@ def train(net_type, use_cuda, save_dirname, is_local): ...@@ -136,7 +136,7 @@ def train(net_type, use_cuda, save_dirname, is_local):
logits = fluid.layers.fc(input=net, size=classdim, act="softmax") logits = fluid.layers.fc(input=net, size=classdim, act="softmax")
cost, predict = fluid.layers.softmax_with_cross_entropy( cost, predict = fluid.layers.softmax_with_cross_entropy(
logits, label, return_softmax=True) logits, label, return_softmax=True)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label) acc = fluid.layers.accuracy(input=predict, label=label)
# Test program # Test program
...@@ -460,7 +460,7 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase): ...@@ -460,7 +460,7 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase):
logits = fluid.layers.fc(input=net, size=10, act="softmax") logits = fluid.layers.fc(input=net, size=10, act="softmax")
cost, predict = fluid.layers.softmax_with_cross_entropy( cost, predict = fluid.layers.softmax_with_cross_entropy(
logits, label, return_softmax=True) logits, label, return_softmax=True)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
optimizer = fluid.optimizer.Lamb(learning_rate=0.001) optimizer = fluid.optimizer.Lamb(learning_rate=0.001)
amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists(
......
...@@ -32,7 +32,7 @@ def linear_fc(num): ...@@ -32,7 +32,7 @@ def linear_fc(num):
for _ in six.moves.xrange(num): for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu') hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label) loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
...@@ -63,7 +63,7 @@ def residual_block(num): ...@@ -63,7 +63,7 @@ def residual_block(num):
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
fc = fluid.layers.fc(input=hidden, size=10) fc = fluid.layers.fc(input=hidden, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label) loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
...@@ -83,7 +83,7 @@ def conv_net(img, label): ...@@ -83,7 +83,7 @@ def conv_net(img, label):
act="relu") act="relu")
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss return avg_loss
......
...@@ -87,7 +87,7 @@ def bow_net(data, ...@@ -87,7 +87,7 @@ def bow_net(data,
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
return avg_cost return avg_cost
......
...@@ -103,7 +103,7 @@ def _dygraph_to_static_func_(dygraph_func): ...@@ -103,7 +103,7 @@ def _dygraph_to_static_func_(dygraph_func):
@dygraph_to_static_func @dygraph_to_static_func
def func(x): def func(x):
if fluid.layers.mean(x) < 0: if paddle.mean(x) < 0:
x_v = x - 1 x_v = x - 1
else: else:
x_v = x + 1 x_v = x + 1
......
...@@ -897,7 +897,7 @@ class ReduceLROnPlateau(LearningRateDecay): ...@@ -897,7 +897,7 @@ class ReduceLROnPlateau(LearningRateDecay):
check_type(loss, 'loss', Variable, 'ReduceLROnPlateau.step') check_type(loss, 'loss', Variable, 'ReduceLROnPlateau.step')
assert len(loss.shape) == 1 and loss.shape[0] == 1, "the loss.shape " \ assert len(loss.shape) == 1 and loss.shape[0] == 1, "the loss.shape " \
"should be (1L,), but the current loss.shape is {}. Maybe that " \ "should be (1L,), but the current loss.shape is {}. Maybe that " \
"you should call fluid.layers.mean to process it first.".format(loss.shape) "you should call paddle.mean to process it first.".format(loss.shape)
self.epoch_num += 1 self.epoch_num += 1
if self.cooldown_counter > 0: if self.cooldown_counter > 0:
......
...@@ -16,6 +16,7 @@ import argparse ...@@ -16,6 +16,7 @@ import argparse
import logging import logging
import time import time
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
...@@ -123,7 +124,7 @@ def model(): ...@@ -123,7 +124,7 @@ def model():
auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict,
label=label) label=label)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
return datas, avg_cost, predict, train_file_path return datas, avg_cost, predict, train_file_path
......
...@@ -102,7 +102,7 @@ def run_check(): ...@@ -102,7 +102,7 @@ def run_check():
exe = executor.Executor( exe = executor.Executor(
core.CUDAPlace(0) if core.is_compiled_with_cuda() and core.CUDAPlace(0) if core.is_compiled_with_cuda() and
(core.get_cuda_device_count() > 0) else core.CPUPlace()) (core.get_cuda_device_count() > 0) else core.CPUPlace())
loss = layers.mean(out) loss = paddle.mean(out)
loss.persistable = True loss.persistable = True
optimizer.SGD(learning_rate=0.01).minimize(loss) optimizer.SGD(learning_rate=0.01).minimize(loss)
startup_prog.random_seed = 1 startup_prog.random_seed = 1
......
...@@ -493,7 +493,7 @@ def save_params(executor, dirname, main_program=None, filename=None): ...@@ -493,7 +493,7 @@ def save_params(executor, dirname, main_program=None, filename=None):
predict = fluid.layers.fc(input=image, size=10, act='softmax') predict = fluid.layers.fc(input=image, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label) loss = fluid.layers.cross_entropy(input=predict, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
...@@ -719,7 +719,7 @@ def save_persistables(executor, dirname, main_program=None, filename=None): ...@@ -719,7 +719,7 @@ def save_persistables(executor, dirname, main_program=None, filename=None):
predict = fluid.layers.fc(input=image, size=10, act='softmax') predict = fluid.layers.fc(input=image, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label) loss = fluid.layers.cross_entropy(input=predict, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
fluid.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name) fluid.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name)
...@@ -1315,7 +1315,7 @@ def save_inference_model(dirname, ...@@ -1315,7 +1315,7 @@ def save_inference_model(dirname,
predict = fluid.layers.fc(input=image, size=10, act='softmax') predict = fluid.layers.fc(input=image, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label) loss = fluid.layers.cross_entropy(input=predict, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
......
...@@ -13089,7 +13089,7 @@ def mean(x, name=None): ...@@ -13089,7 +13089,7 @@ def mean(x, name=None):
input = fluid.layers.data( input = fluid.layers.data(
name='data', shape=[2, 3], dtype='float32') name='data', shape=[2, 3], dtype='float32')
mean = fluid.layers.mean(input) mean = paddle.mean(input)
""" """
if _in_legacy_dygraph(): if _in_legacy_dygraph():
......
...@@ -913,7 +913,7 @@ class Optimizer(object): ...@@ -913,7 +913,7 @@ class Optimizer(object):
program = loss.block.program program = loss.block.program
assert len(loss.shape) == 1 and loss.shape[0] == 1, \ assert len(loss.shape) == 1 and loss.shape[0] == 1, \
"The loss.shape should be (1L,), but the current loss.shape is {}. " \ "The loss.shape should be (1L,), but the current loss.shape is {}. " \
"Maybe that you should call fluid.layers.mean to process the current loss.".format( "Maybe that you should call paddle.mean to process the current loss.".format(
loss.shape) loss.shape)
parameter_list = parameter_list if parameter_list \ parameter_list = parameter_list if parameter_list \
else self._parameter_list else self._parameter_list
...@@ -6834,7 +6834,7 @@ class LookaheadOptimizer(object): ...@@ -6834,7 +6834,7 @@ class LookaheadOptimizer(object):
label = fluid.layers.data(name="label", shape=[1], dtype="int64") label = fluid.layers.data(name="label", shape=[1], dtype="int64")
y = fluid.layers.fc(input=[x], size=2, act="softmax") y = fluid.layers.fc(input=[x], size=2, act="softmax")
loss = fluid.layers.cross_entropy(input=y, label=label) loss = fluid.layers.cross_entropy(input=y, label=label)
loss = fluid.layers.mean(x=loss) loss = paddle.mean(x=loss)
sgd = fluid.optimizer.SGD(learning_rate=0.01) sgd = fluid.optimizer.SGD(learning_rate=0.01)
optimizer = fluid.optimizer.LookaheadOptimizer(sgd, optimizer = fluid.optimizer.LookaheadOptimizer(sgd,
alpha=0.5, alpha=0.5,
......
...@@ -48,7 +48,7 @@ def convolution_net(data, ...@@ -48,7 +48,7 @@ def convolution_net(data,
size=class_dim, size=class_dim,
act="softmax") act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label) accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction return avg_cost, accuracy, prediction
...@@ -93,7 +93,7 @@ def dyn_rnn_lstm(data, ...@@ -93,7 +93,7 @@ def dyn_rnn_lstm(data,
last = fluid.layers.sequence_last_step(rnn()) last = fluid.layers.sequence_last_step(rnn())
prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label) accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction return avg_cost, accuracy, prediction
...@@ -132,7 +132,7 @@ def stacked_lstm_net(data, ...@@ -132,7 +132,7 @@ def stacked_lstm_net(data,
size=class_dim, size=class_dim,
act='softmax') act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label) accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction return avg_cost, accuracy, prediction
......
...@@ -56,16 +56,16 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): ...@@ -56,16 +56,16 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16):
with amp.bf16.bf16_guard(): with amp.bf16.bf16_guard():
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
else: else:
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
with amp.bf16.bf16_guard(): with amp.bf16.bf16_guard():
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
else: else:
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
lr = 5e-3 if use_bf16 else 1e-3 lr = 5e-3 if use_bf16 else 1e-3
sgd_optimizer = fluid.optimizer.SGD(learning_rate=lr) sgd_optimizer = fluid.optimizer.SGD(learning_rate=lr)
......
...@@ -126,7 +126,7 @@ def train(net_type, use_cuda, save_dirname, is_local): ...@@ -126,7 +126,7 @@ def train(net_type, use_cuda, save_dirname, is_local):
predict = fluid.layers.fc(input=net, size=classdim, act='softmax') predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label) acc = fluid.layers.accuracy(input=predict, label=label)
# Test program # Test program
......
...@@ -160,7 +160,7 @@ def train(use_cuda, save_dirname=None, is_local=True): ...@@ -160,7 +160,7 @@ def train(use_cuda, save_dirname=None, is_local=True):
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
name='crfw', name='crfw',
learning_rate=mix_hidden_lr)) learning_rate=mix_hidden_lr))
avg_cost = fluid.layers.mean(crf_cost) avg_cost = paddle.mean(crf_cost)
# TODO(qiao) # TODO(qiao)
# check other optimizers and check why out will be NAN # check other optimizers and check why out will be NAN
......
...@@ -34,7 +34,7 @@ BATCH_SIZE = 64 ...@@ -34,7 +34,7 @@ BATCH_SIZE = 64
def loss_net(hidden, label): def loss_net(hidden, label):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label) acc = fluid.layers.accuracy(input=prediction, label=label)
return prediction, avg_loss, acc return prediction, avg_loss, acc
......
...@@ -153,7 +153,7 @@ def model(): ...@@ -153,7 +153,7 @@ def model():
label = layers.data(name='score', shape=[1], dtype='float32') label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label) square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost) avg_cost = paddle.mean(square_cost)
return scale_infer, avg_cost return scale_infer, avg_cost
......
...@@ -158,7 +158,7 @@ def seq_to_seq_net(): ...@@ -158,7 +158,7 @@ def seq_to_seq_net():
dtype='int64', dtype='int64',
lod_level=1) lod_level=1)
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
return avg_cost, prediction return avg_cost, prediction
......
...@@ -85,7 +85,7 @@ def train(target, ...@@ -85,7 +85,7 @@ def train(target,
size=dict_size, size=dict_size,
act='softmax') act='softmax')
cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) cost = fluid.layers.cross_entropy(input=predict_word, label=words[4])
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
return avg_cost, predict_word return avg_cost, predict_word
word_dict = paddle.dataset.imikolov.build_dict() word_dict = paddle.dataset.imikolov.build_dict()
......
...@@ -145,7 +145,7 @@ def train_main(use_cuda): ...@@ -145,7 +145,7 @@ def train_main(use_cuda):
dtype='int64', dtype='int64',
lod_level=1) lod_level=1)
cost = layers.cross_entropy(input=rnn_out, label=label) cost = layers.cross_entropy(input=rnn_out, label=label)
avg_cost = layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
optimizer = fluid.optimizer.Adagrad(learning_rate=1e-3) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-3)
optimizer.minimize(avg_cost) optimizer.minimize(avg_cost)
......
...@@ -35,7 +35,7 @@ with fluid.program_guard(main_program=prog): ...@@ -35,7 +35,7 @@ with fluid.program_guard(main_program=prog):
label = fluid.layers.data(name='y', shape=[1], dtype='int64') label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
prog_clip = prog.clone() prog_clip = prog.clone()
prog_clip.block(0).var(hidden1.name)._set_error_clip( prog_clip.block(0).var(hidden1.name)._set_error_clip(
......
...@@ -66,7 +66,7 @@ class TestMNISTIfElseOp(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
mask=cond, mask=cond,
x=image) x=image)
loss = layers.cross_entropy(input=prob, label=label) loss = layers.cross_entropy(input=prob, label=label)
avg_loss = layers.mean(loss) avg_loss = paddle.mean(loss)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, startup_prog) optimizer.minimize(avg_loss, startup_prog)
...@@ -124,7 +124,7 @@ class TestMNISTIfElseOp(unittest.TestCase): ...@@ -124,7 +124,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
prob = ie() prob = ie()
loss = layers.cross_entropy(input=prob[0], label=label) loss = layers.cross_entropy(input=prob[0], label=label)
avg_loss = layers.mean(loss) avg_loss = paddle.mean(loss)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, startup_prog) optimizer.minimize(avg_loss, startup_prog)
......
...@@ -61,7 +61,7 @@ class TestASPHelperPruningBase(unittest.TestCase): ...@@ -61,7 +61,7 @@ class TestASPHelperPruningBase(unittest.TestCase):
def run_training_pruning_test(self, get_mask_gen_func, get_mask_check_func): def run_training_pruning_test(self, get_mask_gen_func, get_mask_check_func):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
loss = fluid.layers.mean( loss = paddle.mean(
fluid.layers.cross_entropy(input=self.predict, fluid.layers.cross_entropy(input=self.predict,
label=self.label)) label=self.label))
optimizer = paddle.incubate.asp.decorate( optimizer = paddle.incubate.asp.decorate(
......
...@@ -242,7 +242,7 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase): ...@@ -242,7 +242,7 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase):
def test_training_pruning(self): def test_training_pruning(self):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
loss = fluid.layers.mean( loss = paddle.mean(
fluid.layers.cross_entropy(input=self.predict, fluid.layers.cross_entropy(input=self.predict,
label=self.label)) label=self.label))
optimizer = sparsity.decorate( optimizer = sparsity.decorate(
......
...@@ -48,7 +48,7 @@ class TestASPStaticOptimize(unittest.TestCase): ...@@ -48,7 +48,7 @@ class TestASPStaticOptimize(unittest.TestCase):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
self.img, self.label, predict = build_model() self.img, self.label, predict = build_model()
self.loss = fluid.layers.mean( self.loss = paddle.mean(
fluid.layers.cross_entropy(input=predict, label=self.label)) fluid.layers.cross_entropy(input=predict, label=self.label))
self.optimizer = fluid.optimizer.SGD(learning_rate=0.01) self.optimizer = fluid.optimizer.SGD(learning_rate=0.01)
......
...@@ -66,7 +66,7 @@ class TestASPStaticPruningBase(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestASPStaticPruningBase(unittest.TestCase):
def test_training_pruning(self): def test_training_pruning(self):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
loss = fluid.layers.mean( loss = paddle.mean(
fluid.layers.cross_entropy(input=self.predict, fluid.layers.cross_entropy(input=self.predict,
label=self.label)) label=self.label))
optimizer = paddle.incubate.asp.decorate( optimizer = paddle.incubate.asp.decorate(
......
...@@ -141,7 +141,7 @@ class TestASPStaticOptimize(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestASPStaticOptimize(unittest.TestCase):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
self.img, self.label, predict = build_model() self.img, self.label, predict = build_model()
self.loss = fluid.layers.mean( self.loss = paddle.mean(
fluid.layers.cross_entropy(input=predict, label=self.label)) fluid.layers.cross_entropy(input=predict, label=self.label))
self.optimizer = fluid.optimizer.SGD(learning_rate=0.01) self.optimizer = fluid.optimizer.SGD(learning_rate=0.01)
self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer = paddle.incubate.asp.decorate(self.optimizer)
......
...@@ -96,7 +96,7 @@ def mlp_pretrain_forward(train_program, start_program): ...@@ -96,7 +96,7 @@ def mlp_pretrain_forward(train_program, start_program):
predict = mlp(input) predict = mlp(input)
cost = layers.cross_entropy(input=predict, label=label) cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
return avg_cost, train_program, start_program return avg_cost, train_program, start_program
......
...@@ -70,7 +70,7 @@ def net(): ...@@ -70,7 +70,7 @@ def net():
cost, y_predict = fluid.layers.softmax_with_cross_entropy( cost, y_predict = fluid.layers.softmax_with_cross_entropy(
hidden, y, return_softmax=True) hidden, y, return_softmax=True)
acc_top1 = fluid.layers.accuracy(input=y_predict, label=y, k=1) acc_top1 = fluid.layers.accuracy(input=y_predict, label=y, k=1)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.05) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.05)
sgd_optimizer.minimize(avg_cost) sgd_optimizer.minimize(avg_cost)
......
...@@ -84,7 +84,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -84,7 +84,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -92,7 +92,7 @@ class TestDistCTR2x2(TestDistRunnerBase): ...@@ -92,7 +92,7 @@ class TestDistCTR2x2(TestDistRunnerBase):
auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict,
label=label) label=label)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
inference_program = paddle.fluid.default_main_program().clone() inference_program = paddle.fluid.default_main_program().clone()
......
...@@ -143,7 +143,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -143,7 +143,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
label=label) label=label)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
self.feeds = datas self.feeds = datas
self.train_file_path = ["fake1", "fake2"] self.train_file_path = ["fake1", "fake2"]
......
...@@ -116,7 +116,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): ...@@ -116,7 +116,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax') predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
fluid.layers.Print(avg_cost, message="avg_cost") fluid.layers.Print(avg_cost, message="avg_cost")
self.feeds = datas self.feeds = datas
......
...@@ -76,7 +76,7 @@ class TestFleetMetaOptimizerPrecision(TestDistRunnerBase): ...@@ -76,7 +76,7 @@ class TestFleetMetaOptimizerPrecision(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -76,7 +76,7 @@ class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase): ...@@ -76,7 +76,7 @@ class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -91,7 +91,7 @@ def get_loss(cos_q_pt, cos_q_nt): ...@@ -91,7 +91,7 @@ def get_loss(cos_q_pt, cos_q_nt):
shape=[-1, 1], shape=[-1, 1],
value=0.0, value=0.0,
dtype='float32'), loss_op2) dtype='float32'), loss_op2)
avg_cost = fluid.layers.mean(loss_op3) avg_cost = paddle.mean(loss_op3)
return avg_cost return avg_cost
......
...@@ -133,7 +133,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -133,7 +133,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
acc = fluid.layers.accuracy(input=predict, label=label) acc = fluid.layers.accuracy(input=predict, label=label)
auc_var, _, _ = fluid.layers.auc(input=predict, label=label) auc_var, _, _ = fluid.layers.auc(input=predict, label=label)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
self.feeds = datas self.feeds = datas
self.train_file_path = ["fake1", "fake2"] self.train_file_path = ["fake1", "fake2"]
......
...@@ -85,7 +85,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -85,7 +85,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -58,7 +58,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -58,7 +58,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -38,7 +38,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -38,7 +38,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -37,7 +37,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -37,7 +37,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -49,7 +49,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -49,7 +49,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Train program # Train program
predict = cnn_model(images) predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
......
...@@ -229,7 +229,7 @@ class DistSeResneXt2x2(TestDistRunnerBase): ...@@ -229,7 +229,7 @@ class DistSeResneXt2x2(TestDistRunnerBase):
out = model.net(input=image, class_dim=102) out = model.net(input=image, class_dim=102)
cost = fluid.layers.cross_entropy(input=out, label=label) cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
......
...@@ -56,7 +56,7 @@ def runtime_main(): ...@@ -56,7 +56,7 @@ def runtime_main():
act='softmax') act='softmax')
cost = paddle.fluid.layers.cross_entropy(input=prediction, cost = paddle.fluid.layers.cross_entropy(input=prediction,
label=input_y) label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.sharding = True strategy.sharding = True
......
...@@ -137,7 +137,7 @@ class TestDistTextClassification2x2(TestDistRunnerBase): ...@@ -137,7 +137,7 @@ class TestDistTextClassification2x2(TestDistRunnerBase):
# Train program # Train program
predict = conv_net(data, dict_dim) predict = conv_net(data, dict_dim)
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
acc = fluid.layers.accuracy(input=predict, label=label) acc = fluid.layers.accuracy(input=predict, label=label)
inference_program = fluid.default_main_program().clone() inference_program = fluid.default_main_program().clone()
......
...@@ -94,7 +94,7 @@ class TestDistWord2vec2x2(TestDistRunnerBase): ...@@ -94,7 +94,7 @@ class TestDistWord2vec2x2(TestDistRunnerBase):
initializer=fluid.initializer.Constant(value=0.1))) initializer=fluid.initializer.Constant(value=0.1)))
cost = fluid.layers.cross_entropy(input=predict_word, cost = fluid.layers.cross_entropy(input=predict_word,
label=words[4]) label=words[4])
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
return avg_cost, predict_word return avg_cost, predict_word
word_dict = paddle.dataset.imikolov.build_dict() word_dict = paddle.dataset.imikolov.build_dict()
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, Layer, Linear from paddle.fluid.dygraph import Embedding, Layer, Linear
from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.jit import declarative
...@@ -357,7 +357,7 @@ class PretrainModelLayer(Layer): ...@@ -357,7 +357,7 @@ class PretrainModelLayer(Layer):
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(logits=fc_out, mask_lm_loss = fluid.layers.softmax_with_cross_entropy(logits=fc_out,
label=mask_label) label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss) mean_mask_lm_loss = paddle.mean(mask_lm_loss)
next_sent_fc_out = self.next_sent_fc(next_sent_feat) next_sent_fc_out = self.next_sent_fc(next_sent_feat)
...@@ -367,7 +367,7 @@ class PretrainModelLayer(Layer): ...@@ -367,7 +367,7 @@ class PretrainModelLayer(Layer):
next_sent_acc = fluid.layers.accuracy(input=next_sent_softmax, next_sent_acc = fluid.layers.accuracy(input=next_sent_softmax,
label=labels) label=labels)
mean_next_sent_loss = fluid.layers.mean(next_sent_loss) mean_next_sent_loss = paddle.mean(next_sent_loss)
loss = mean_next_sent_loss + mean_mask_lm_loss loss = mean_next_sent_loss + mean_mask_lm_loss
return next_sent_acc, mean_mask_lm_loss, loss return next_sent_acc, mean_mask_lm_loss, loss
...@@ -41,7 +41,7 @@ def dyfunc_empty_nonlocal(x): ...@@ -41,7 +41,7 @@ def dyfunc_empty_nonlocal(x):
def dyfunc_with_if_else(x_v, label=None): def dyfunc_with_if_else(x_v, label=None):
if fluid.layers.mean(x_v).numpy()[0] > 5: if paddle.mean(x_v).numpy()[0] > 5:
x_v = x_v - 1 x_v = x_v - 1
else: else:
x_v = x_v + 1 x_v = x_v + 1
...@@ -87,14 +87,14 @@ def dyfunc_with_if_else3(x): ...@@ -87,14 +87,14 @@ def dyfunc_with_if_else3(x):
m = x + 2 m = x + 2
n = x + 3 n = x + 3
return q, x, y, z return q, x, y, z
q, x, y, z = fluid.layers.cond(fluid.layers.mean(x)[0] < 5, lambda : q, x, y, z = fluid.layers.cond(paddle.mean(x)[0] < 5, lambda :
paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y), paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y),
lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q, lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q,
x, y)) x, y))
""" """
y = x + 1 y = x + 1
# NOTE: x_v[0] < 5 is True # NOTE: x_v[0] < 5 is True
if fluid.layers.mean(x).numpy()[0] < 5: if paddle.mean(x).numpy()[0] < 5:
x = x + 1 x = x + 1
z = x + 2 z = x + 2
q = x + 3 q = x + 3
...@@ -155,13 +155,13 @@ def nested_if_else(x_v): ...@@ -155,13 +155,13 @@ def nested_if_else(x_v):
batch_size = fluid.layers.shape(x_v)[0] batch_size = fluid.layers.shape(x_v)[0]
# if tensor.shape is [1], now support to compare with numpy. # if tensor.shape is [1], now support to compare with numpy.
if fluid.layers.mean(x_v).numpy() < 0: if paddle.mean(x_v).numpy() < 0:
y = x_v + bias y = x_v + bias
w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10) w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10)
if y.numpy()[0] < 10: if y.numpy()[0] < 10:
tmp = y * w tmp = y * w
y = fluid.layers.relu(tmp) y = fluid.layers.relu(tmp)
if fluid.layers.mean(y).numpy()[0] < batch_size: if paddle.mean(y).numpy()[0] < batch_size:
y = fluid.layers.abs(y) y = fluid.layers.abs(y)
else: else:
tmp = fluid.layers.fill_constant([feat_size], tmp = fluid.layers.fill_constant([feat_size],
...@@ -257,7 +257,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): ...@@ -257,7 +257,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
value=1) value=1)
# Control flow `if` statement # Control flow `if` statement
fc_out = self.fc(input) fc_out = self.fc(input)
if fluid.layers.mean(fc_out).numpy()[0] < 0: if paddle.mean(fc_out).numpy()[0] < 0:
y = fc_out + self.constant_vars['bias'] y = fc_out + self.constant_vars['bias']
self.constant_vars['w'] = fluid.layers.fill_constant( self.constant_vars['w'] = fluid.layers.fill_constant(
[5], dtype='float32', value=10) [5], dtype='float32', value=10)
...@@ -280,13 +280,13 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): ...@@ -280,13 +280,13 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
else: else:
y = fc_out - self.constant_vars['bias'] y = fc_out - self.constant_vars['bias']
loss = fluid.layers.mean(y) loss = paddle.mean(y)
return loss return loss
def if_with_and_or(x_v, label=None): def if_with_and_or(x_v, label=None):
batch_size = fluid.layers.shape(x_v) batch_size = fluid.layers.shape(x_v)
if x_v is not None and (fluid.layers.mean(x_v).numpy()[0] > 0 or label if x_v is not None and (paddle.mean(x_v).numpy()[0] > 0 or label
is not None) and batch_size[0] > 1 and True: is not None) and batch_size[0] > 1 and True:
x_v = x_v - 1 x_v = x_v - 1
else: else:
...@@ -318,7 +318,7 @@ def if_with_and_or_2(x, y=None): ...@@ -318,7 +318,7 @@ def if_with_and_or_2(x, y=None):
def if_with_and_or_3(x, y=None): def if_with_and_or_3(x, y=None):
batch_size = fluid.layers.shape(x) batch_size = fluid.layers.shape(x)
mean_res = fluid.layers.mean(x) mean_res = paddle.mean(x)
if x is not None and batch_size[0] > 1 and y is not None and mean_res.numpy( if x is not None and batch_size[0] > 1 and y is not None and mean_res.numpy(
)[0] > 0: )[0] > 0:
x = x + 1 x = x + 1
...@@ -329,7 +329,7 @@ def if_with_and_or_3(x, y=None): ...@@ -329,7 +329,7 @@ def if_with_and_or_3(x, y=None):
def if_with_and_or_4(x, y=None): def if_with_and_or_4(x, y=None):
batch_size = fluid.layers.shape(x) batch_size = fluid.layers.shape(x)
mean_res = fluid.layers.mean(x) mean_res = paddle.mean(x)
if (x is not None and batch_size[0] > 1) or (y is not None if (x is not None and batch_size[0] > 1) or (y is not None
and mean_res.numpy()[0] > 0): and mean_res.numpy()[0] > 0):
x = x + 1 x = x + 1
...@@ -349,7 +349,7 @@ def if_with_class_var(x, y=None): ...@@ -349,7 +349,7 @@ def if_with_class_var(x, y=None):
foo = Foo() foo = Foo()
batch_size = fluid.layers.shape(x) batch_size = fluid.layers.shape(x)
mean_res = fluid.layers.mean(x) mean_res = paddle.mean(x)
if batch_size[0] > foo.a: if batch_size[0] > foo.a:
x = x + foo.b x = x + foo.b
...@@ -361,7 +361,7 @@ def if_with_class_var(x, y=None): ...@@ -361,7 +361,7 @@ def if_with_class_var(x, y=None):
def if_tensor_case(x): def if_tensor_case(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
mean = fluid.layers.mean(x) mean = paddle.mean(x)
# It is equivalent to `if mean != 0` # It is equivalent to `if mean != 0`
if mean: if mean:
for i in range(0, 10): for i in range(0, 10):
...@@ -376,7 +376,7 @@ def if_tensor_case(x): ...@@ -376,7 +376,7 @@ def if_tensor_case(x):
x += i x += i
# join `and`/`or` # join `and`/`or`
if fluid.layers.mean(x) + 1 and mean > 1 and x is not None or 2 > 1: if paddle.mean(x) + 1 and mean > 1 and x is not None or 2 > 1:
x -= 1 x -= 1
# `not` statement # `not` statement
......
...@@ -19,6 +19,7 @@ import textwrap ...@@ -19,6 +19,7 @@ import textwrap
from paddle.utils import gast from paddle.utils import gast
import inspect import inspect
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_func from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_func
...@@ -59,7 +60,7 @@ class TestAST2Func(unittest.TestCase): ...@@ -59,7 +60,7 @@ class TestAST2Func(unittest.TestCase):
def func(x): def func(x):
y = fluid.layers.relu(x) y = fluid.layers.relu(x)
loss = fluid.layers.mean(y) loss = paddle.mean(y)
return loss return loss
x_data = np.random.random([10, 16]).astype('float32') x_data = np.random.random([10, 16]).astype('float32')
......
...@@ -590,7 +590,7 @@ def val_bmn(model, args): ...@@ -590,7 +590,7 @@ def val_bmn(model, args):
loss, tem_loss, pem_reg_loss, pem_cls_loss = bmn_loss_func( loss, tem_loss, pem_reg_loss, pem_cls_loss = bmn_loss_func(
pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, args) pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, args)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
loss_data += [ loss_data += [
avg_loss.numpy()[0], avg_loss.numpy()[0],
...@@ -665,7 +665,7 @@ class TestTrain(unittest.TestCase): ...@@ -665,7 +665,7 @@ class TestTrain(unittest.TestCase):
loss, tem_loss, pem_reg_loss, pem_cls_loss = bmn_loss_func( loss, tem_loss, pem_reg_loss, pem_cls_loss = bmn_loss_func(
pred_bm, pred_start, pred_end, gt_iou_map, gt_start, pred_bm, pred_start, pred_end, gt_iou_map, gt_start,
gt_end, args) gt_end, args)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
avg_loss.backward() avg_loss.backward()
adam.minimize(avg_loss) adam.minimize(avg_loss)
......
...@@ -17,7 +17,7 @@ from __future__ import print_function ...@@ -17,7 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from collections import Counter from collections import Counter
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.jit import declarative
...@@ -113,7 +113,7 @@ class TestCacheProgramWithOptimizer(unittest.TestCase): ...@@ -113,7 +113,7 @@ class TestCacheProgramWithOptimizer(unittest.TestCase):
def simple_func(x): def simple_func(x):
inputs = fluid.dygraph.to_variable(x) inputs = fluid.dygraph.to_variable(x)
mean = fluid.layers.mean(inputs) mean = paddle.mean(inputs)
return mean return mean
......
...@@ -37,7 +37,7 @@ np.random.seed(SEED) ...@@ -37,7 +37,7 @@ np.random.seed(SEED)
# Use a decorator to test exception # Use a decorator to test exception
@paddle.jit.to_static @paddle.jit.to_static
def dyfunc_with_if(x_v): def dyfunc_with_if(x_v):
if fluid.layers.mean(x_v).numpy()[0] > 5: if paddle.mean(x_v).numpy()[0] > 5:
x_v = x_v - 1 x_v = x_v - 1
else: else:
x_v = x_v + 1 x_v = x_v + 1
...@@ -58,7 +58,7 @@ def nested_func(x_v): ...@@ -58,7 +58,7 @@ def nested_func(x_v):
@paddle.jit.to_static @paddle.jit.to_static
def dyfunc_with_third_library_logging(x_v): def dyfunc_with_third_library_logging(x_v):
logging.info('test dyfunc_with_third_library_logging') logging.info('test dyfunc_with_third_library_logging')
if fluid.layers.mean(x_v).numpy()[0] > 5: if paddle.mean(x_v).numpy()[0] > 5:
x_v = x_v - 1 x_v = x_v - 1
else: else:
x_v = x_v + 1 x_v = x_v + 1
......
...@@ -33,7 +33,7 @@ def inner_func(): ...@@ -33,7 +33,7 @@ def inner_func():
def func_error_in_compile_time(x): def func_error_in_compile_time(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
inner_func() inner_func()
if fluid.layers.mean(x) < 0: if paddle.mean(x) < 0:
x_v = x - 1 x_v = x - 1
else: else:
x_v = x + 1 x_v = x + 1
...@@ -78,7 +78,7 @@ class LayerErrorInCompiletime(fluid.dygraph.Layer): ...@@ -78,7 +78,7 @@ class LayerErrorInCompiletime(fluid.dygraph.Layer):
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int") z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int")
out = fluid.layers.mean(y[z]) out = paddle.mean(y[z])
return out return out
...@@ -386,7 +386,7 @@ class TestJitSaveInCompiletime(TestErrorBase): ...@@ -386,7 +386,7 @@ class TestJitSaveInCompiletime(TestErrorBase):
'y = self._linear(x)', 'y = self._linear(x)',
'z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int")', 'z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int")',
'<--- HERE', '<--- HERE',
'out = fluid.layers.mean(y[z])', 'out = paddle.mean(y[z])',
'return out' 'return out'
] ]
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
...@@ -59,7 +59,7 @@ class Linear(fluid.dygraph.Layer): ...@@ -59,7 +59,7 @@ class Linear(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, x): def forward(self, x):
pre = self.fc(x) pre = self.fc(x)
loss = fluid.layers.mean(pre) loss = paddle.mean(pre)
return pre, loss return pre, loss
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import unittest import unittest
from paddle.fluid.dygraph import declarative from paddle.fluid.dygraph import declarative
...@@ -23,7 +24,7 @@ from paddle.fluid.dygraph import declarative ...@@ -23,7 +24,7 @@ from paddle.fluid.dygraph import declarative
@fluid.dygraph.declarative @fluid.dygraph.declarative
def dygraph_decorated_func(x): def dygraph_decorated_func(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
if fluid.layers.mean(x) > 0: if paddle.mean(x) > 0:
x_v = x - 1 x_v = x - 1
else: else:
x_v = x + 1 x_v = x + 1
...@@ -33,7 +34,7 @@ def dygraph_decorated_func(x): ...@@ -33,7 +34,7 @@ def dygraph_decorated_func(x):
@fluid.dygraph.declarative @fluid.dygraph.declarative
def jit_decorated_func(x): def jit_decorated_func(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
if fluid.layers.mean(x) > 0: if paddle.mean(x) > 0:
x_v = x - 1 x_v = x - 1
else: else:
x_v = x + 1 x_v = x + 1
......
...@@ -251,7 +251,7 @@ def relu(x): ...@@ -251,7 +251,7 @@ def relu(x):
def call_external_func(x, label=None): def call_external_func(x, label=None):
if fluid.layers.mean(x) < 0: if paddle.mean(x) < 0:
x_v = x - 1 x_v = x - 1
else: else:
x_v = add_fn(x) x_v = add_fn(x)
...@@ -274,7 +274,7 @@ class NetWithExternalFunc(fluid.dygraph.Layer): ...@@ -274,7 +274,7 @@ class NetWithExternalFunc(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, x, label=None): def forward(self, x, label=None):
if fluid.layers.mean(x) < 0: if paddle.mean(x) < 0:
x_v = x - 1 x_v = x - 1
else: else:
x_v = add_fn(x) x_v = add_fn(x)
......
...@@ -403,7 +403,7 @@ class LexNet(fluid.dygraph.Layer): ...@@ -403,7 +403,7 @@ class LexNet(fluid.dygraph.Layer):
crf_cost = self.linear_chain_crf(input=emission, crf_cost = self.linear_chain_crf(input=emission,
label=target, label=target,
length=length) length=length)
avg_cost = fluid.layers.mean(x=crf_cost) avg_cost = paddle.mean(x=crf_cost)
crf_decode = self.crf_decoding(input=emission, length=length) crf_decode = self.crf_decoding(input=emission, length=length)
return avg_cost, crf_decode return avg_cost, crf_decode
......
...@@ -16,6 +16,7 @@ from __future__ import print_function ...@@ -16,6 +16,7 @@ from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import declarative from paddle.fluid.dygraph import declarative
...@@ -25,7 +26,7 @@ def call_lambda_as_func(x): ...@@ -25,7 +26,7 @@ def call_lambda_as_func(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
add_func = lambda x, y: x + y add_func = lambda x, y: x + y
mean_func = lambda x: fluid.layers.mean(x) mean_func = lambda x: paddle.mean(x)
y = add_func(x, 1) y = add_func(x, 1)
y = add_func(y, add_func(y, -1)) y = add_func(y, add_func(y, -1))
...@@ -38,7 +39,7 @@ def call_lambda_directly(x): ...@@ -38,7 +39,7 @@ def call_lambda_directly(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
y = (lambda x, y: x + y)(x, x) y = (lambda x, y: x + y)(x, x)
out = (lambda x: fluid.layers.mean(x))(y) out = (lambda x: paddle.mean(x))(y)
return out return out
...@@ -48,7 +49,7 @@ def call_lambda_in_func(x): ...@@ -48,7 +49,7 @@ def call_lambda_in_func(x):
add_func = lambda x: x + 1 add_func = lambda x: x + 1
y = fluid.layers.mean((lambda x: fluid.layers.relu(x))(x)) y = paddle.mean((lambda x: fluid.layers.relu(x))(x))
out = add_func(y) if y > 1 and y < 2 else (lambda x: x**2)(y) out = add_func(y) if y > 1 and y < 2 else (lambda x: x**2)(y)
return out return out
...@@ -59,7 +60,7 @@ def call_lambda_with_ifExpr(x): ...@@ -59,7 +60,7 @@ def call_lambda_with_ifExpr(x):
add_func = lambda x: x + 1 add_func = lambda x: x + 1
y = fluid.layers.mean(x) y = paddle.mean(x)
out = add_func(y) if y or y < 2 else (lambda x: x**2)(y) out = add_func(y) if y or y < 2 else (lambda x: x**2)(y)
return out return out
...@@ -70,7 +71,7 @@ def call_lambda_with_ifExpr2(x): ...@@ -70,7 +71,7 @@ def call_lambda_with_ifExpr2(x):
add_func = lambda x: x + 1 add_func = lambda x: x + 1
y = fluid.layers.mean(x) y = paddle.mean(x)
# NOTE: y is Variable, but z<2 is python bool value # NOTE: y is Variable, but z<2 is python bool value
z = 0 z = 0
......
...@@ -119,7 +119,7 @@ class MNIST(fluid.dygraph.Layer): ...@@ -119,7 +119,7 @@ class MNIST(fluid.dygraph.Layer):
if label is not None: if label is not None:
acc = fluid.layers.accuracy(input=x, label=label) acc = fluid.layers.accuracy(input=x, label=label)
loss = fluid.layers.cross_entropy(x, label) loss = fluid.layers.cross_entropy(x, label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return x, acc, avg_loss return x, acc, avg_loss
else: else:
......
...@@ -479,7 +479,7 @@ def train_mobilenet(args, to_static): ...@@ -479,7 +479,7 @@ def train_mobilenet(args, to_static):
softmax_out = fluid.layers.softmax(out, use_cudnn=False) softmax_out = fluid.layers.softmax(out, use_cudnn=False)
loss = fluid.layers.cross_entropy(input=softmax_out, loss = fluid.layers.cross_entropy(input=softmax_out,
label=label) label=label)
avg_loss = fluid.layers.mean(x=loss) avg_loss = paddle.mean(x=loss)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
t_start_back = time.time() t_start_back = time.time()
......
...@@ -33,7 +33,7 @@ def nested_input(x, y): ...@@ -33,7 +33,7 @@ def nested_input(x, y):
sub_res = z_elem[0] - z_elem[1] sub_res = z_elem[0] - z_elem[1]
mul_res = y[-1]['d']['da'] * y[-1]['d']['dc'] mul_res = y[-1]['d']['da'] * y[-1]['d']['dc']
mean_func = fluid.layers.mean mean_func = paddle.mean
out = mean_func(sub_res) + mean_func(sum_res) + mean_func(mul_res) out = mean_func(sub_res) + mean_func(sum_res) + mean_func(mul_res)
return out return out
......
...@@ -42,7 +42,7 @@ def simple_func(x, weight_numpy): ...@@ -42,7 +42,7 @@ def simple_func(x, weight_numpy):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
w = fluid.dygraph.to_variable(weight_numpy) w = fluid.dygraph.to_variable(weight_numpy)
y = fluid.layers.matmul(x, w) y = fluid.layers.matmul(x, w)
z = fluid.layers.mean(y) z = paddle.mean(y)
return z return z
...@@ -51,7 +51,7 @@ def decorated_simple_func(x, weight_numpy): ...@@ -51,7 +51,7 @@ def decorated_simple_func(x, weight_numpy):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
w = fluid.dygraph.to_variable(weight_numpy) w = fluid.dygraph.to_variable(weight_numpy)
y = fluid.layers.matmul(x, w) y = fluid.layers.matmul(x, w)
z = fluid.layers.mean(y) z = paddle.mean(y)
return z return z
...@@ -91,7 +91,7 @@ class StaticCode1(): ...@@ -91,7 +91,7 @@ class StaticCode1():
return x_v return x_v
_jst.IfElse( _jst.IfElse(
fluid.layers.mean(x_v)[0] > 5, true_fn_0, false_fn_0, get_args_0, paddle.mean(x_v)[0] > 5, true_fn_0, false_fn_0, get_args_0,
set_args_0, ('x_v', )) set_args_0, ('x_v', ))
def get_args_1(): def get_args_1():
...@@ -148,7 +148,7 @@ class StaticCode2(): ...@@ -148,7 +148,7 @@ class StaticCode2():
return x_v return x_v
_jst.IfElse( _jst.IfElse(
fluid.layers.mean(x_v)[0] > 5, true_fn_2, false_fn_2, get_args_2, paddle.mean(x_v)[0] > 5, true_fn_2, false_fn_2, get_args_2,
set_args_2, ('x_v', )) set_args_2, ('x_v', ))
def get_args_3(): def get_args_3():
......
...@@ -262,7 +262,7 @@ class ResNetHelper: ...@@ -262,7 +262,7 @@ class ResNetHelper:
pred = resnet(img) pred = resnet(img)
loss = fluid.layers.cross_entropy(input=pred, label=label) loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(x=loss) avg_loss = paddle.mean(x=loss)
acc_top1 = fluid.layers.accuracy(input=pred, acc_top1 = fluid.layers.accuracy(input=pred,
label=label, label=label,
k=1) k=1)
......
...@@ -75,7 +75,7 @@ def train(to_static, build_strategy=None): ...@@ -75,7 +75,7 @@ def train(to_static, build_strategy=None):
# precision problem, need to figure out the underlying reason. # precision problem, need to figure out the underlying reason.
# If we remove it, the loss between dygraph and dy2stat is exactly same. # If we remove it, the loss between dygraph and dy2stat is exactly same.
loss = fluid.layers.cross_entropy(input=pred, label=label) loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(x=pred) avg_loss = paddle.mean(x=pred)
acc_top1 = fluid.layers.accuracy(input=pred, label=label, k=1) acc_top1 = fluid.layers.accuracy(input=pred, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=pred, label=label, k=5) acc_top5 = fluid.layers.accuracy(input=pred, label=label, k=5)
......
...@@ -77,7 +77,7 @@ def train(to_static, build_strategy=None): ...@@ -77,7 +77,7 @@ def train(to_static, build_strategy=None):
level='O2'): level='O2'):
pred = resnet(img) pred = resnet(img)
loss = fluid.layers.cross_entropy(input=pred, label=label) loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(x=pred) avg_loss = paddle.mean(x=pred)
acc_top1 = fluid.layers.accuracy(input=pred, label=label, k=1) acc_top1 = fluid.layers.accuracy(input=pred, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=pred, label=label, k=5) acc_top5 = fluid.layers.accuracy(input=pred, label=label, k=5)
......
...@@ -45,7 +45,7 @@ class SimpleFcLayer(fluid.dygraph.Layer): ...@@ -45,7 +45,7 @@ class SimpleFcLayer(fluid.dygraph.Layer):
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = self._linear(y) z = self._linear(y)
out = fluid.layers.mean(z) out = paddle.mean(z)
return out, y return out, y
......
...@@ -318,7 +318,7 @@ class SeResNeXt(fluid.dygraph.Layer): ...@@ -318,7 +318,7 @@ class SeResNeXt(fluid.dygraph.Layer):
softmax_out = fluid.layers.softmax(out, use_cudnn=False) softmax_out = fluid.layers.softmax(out, use_cudnn=False)
loss = fluid.layers.cross_entropy(input=softmax_out, label=label) loss = fluid.layers.cross_entropy(input=softmax_out, label=label)
avg_loss = fluid.layers.mean(x=loss) avg_loss = paddle.mean(x=loss)
acc_top1 = fluid.layers.accuracy(input=softmax_out, label=label, k=1) acc_top1 = fluid.layers.accuracy(input=softmax_out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=softmax_out, label=label, k=5) acc_top5 = fluid.layers.accuracy(input=softmax_out, label=label, k=5)
......
...@@ -97,7 +97,7 @@ class CNN(fluid.dygraph.Layer): ...@@ -97,7 +97,7 @@ class CNN(fluid.dygraph.Layer):
prediction = self._fc_prediction(fc_1) prediction = self._fc_prediction(fc_1)
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label) acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc return avg_cost, prediction, acc
...@@ -141,7 +141,7 @@ class BOW(fluid.dygraph.Layer): ...@@ -141,7 +141,7 @@ class BOW(fluid.dygraph.Layer):
prediction = self._fc_prediction(fc_2) prediction = self._fc_prediction(fc_2)
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label) acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc return avg_cost, prediction, acc
...@@ -189,7 +189,7 @@ class GRU(fluid.dygraph.Layer): ...@@ -189,7 +189,7 @@ class GRU(fluid.dygraph.Layer):
prediction = self._fc_prediction(fc_2) prediction = self._fc_prediction(fc_2)
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label) acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc return avg_cost, prediction, acc
...@@ -247,7 +247,7 @@ class BiGRU(fluid.dygraph.Layer): ...@@ -247,7 +247,7 @@ class BiGRU(fluid.dygraph.Layer):
# TODO(Aurelius84): Uncomment the following codes when we support return variable-length vars. # TODO(Aurelius84): Uncomment the following codes when we support return variable-length vars.
# if label is not None: # if label is not None:
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label) acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc return avg_cost, prediction, acc
# else: # else:
......
...@@ -303,7 +303,7 @@ def train(args, fake_data_reader, to_static): ...@@ -303,7 +303,7 @@ def train(args, fake_data_reader, to_static):
loss = fluid.layers.cross_entropy(input=outputs, loss = fluid.layers.cross_entropy(input=outputs,
label=labels, label=labels,
ignore_index=-1) ignore_index=-1)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
acc_top1 = fluid.layers.accuracy(input=outputs, acc_top1 = fluid.layers.accuracy(input=outputs,
label=labels, label=labels,
k=1) k=1)
......
...@@ -101,7 +101,7 @@ def net(batch_size=4, lr=0.01): ...@@ -101,7 +101,7 @@ def net(batch_size=4, lr=0.01):
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax') predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
return datas, avg_cost return datas, avg_cost
......
...@@ -66,7 +66,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestFleetMetaOptimizer(unittest.TestCase):
act='softmax') act='softmax')
cost = paddle.fluid.layers.cross_entropy(input=prediction, cost = paddle.fluid.layers.cross_entropy(input=prediction,
label=input_y) label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
return avg_cost, strategy return avg_cost, strategy
...@@ -101,7 +101,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): ...@@ -101,7 +101,7 @@ class TestFleetMetaOptimizer(unittest.TestCase):
act='softmax') act='softmax')
cost = paddle.fluid.layers.cross_entropy(input=prediction, cost = paddle.fluid.layers.cross_entropy(input=prediction,
label=input_y) label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
return avg_cost, strategy return avg_cost, strategy
......
...@@ -48,7 +48,7 @@ class TestBase(IPUOpTest): ...@@ -48,7 +48,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data(name=self.feed_list[0], x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype='float32') dtype='float32')
out = paddle.fluid.layers.mean(x) out = paddle.mean(x)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from inference_pass_test import InferencePassTest from inference_pass_test import InferencePassTest
from quant_dequant_test import QuantDequantTest from quant_dequant_test import QuantDequantTest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
...@@ -54,7 +55,7 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): ...@@ -54,7 +55,7 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout) result = fluid.layers.relu(cout)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
...@@ -152,7 +153,7 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): ...@@ -152,7 +153,7 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout) result = fluid.layers.relu(cout)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
...@@ -245,7 +246,7 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): ...@@ -245,7 +246,7 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest):
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout) result = fluid.layers.relu(cout)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from inference_pass_test import InferencePassTest from inference_pass_test import InferencePassTest
from quant_dequant_test import QuantDequantTest from quant_dequant_test import QuantDequantTest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
...@@ -40,7 +41,7 @@ class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): ...@@ -40,7 +41,7 @@ class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest):
act="relu") act="relu")
result = fluid.layers.relu(fc_out) result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label) loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
...@@ -105,7 +106,7 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): ...@@ -105,7 +106,7 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest):
c_out = fluid.layers.reshape(fc_out, shape=[0, 784]) c_out = fluid.layers.reshape(fc_out, shape=[0, 784])
result = fluid.layers.relu(c_out) result = fluid.layers.relu(c_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label) loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
...@@ -172,7 +173,7 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest): ...@@ -172,7 +173,7 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest):
c_out = fluid.layers.reshape(fc_out, shape=[1, 1, 2744]) c_out = fluid.layers.reshape(fc_out, shape=[1, 1, 2744])
result = fluid.layers.relu(c_out) result = fluid.layers.relu(c_out)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
......
...@@ -16,6 +16,7 @@ import unittest ...@@ -16,6 +16,7 @@ import unittest
import numpy as np import numpy as np
from inference_pass_test import InferencePassTest from inference_pass_test import InferencePassTest
from quant_dequant_test import QuantDequantTest from quant_dequant_test import QuantDequantTest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
...@@ -44,7 +45,7 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest): ...@@ -44,7 +45,7 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest):
act=None) act=None)
result = fluid.layers.relu(fc_out) result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label) loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
...@@ -136,7 +137,7 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest): ...@@ -136,7 +137,7 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest):
act=None) act=None)
result = fluid.layers.relu(fc_out) result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label) loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
...@@ -227,7 +228,7 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest): ...@@ -227,7 +228,7 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest):
act=None) act=None)
result = fluid.layers.relu(fc_out) result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label) loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss, result return avg_loss, result
self.main_program.random_seed = 2 self.main_program.random_seed = 2
......
...@@ -20,7 +20,7 @@ import random ...@@ -20,7 +20,7 @@ import random
import unittest import unittest
import warnings import warnings
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import Program, Block from paddle.fluid.framework import Program, Block
...@@ -56,7 +56,7 @@ class PassTest(unittest.TestCase): ...@@ -56,7 +56,7 @@ class PassTest(unittest.TestCase):
def append_gradients(self, outs): def append_gradients(self, outs):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
loss = fluid.layers.mean(outs) loss = paddle.mean(outs)
fluid.backward.append_backward(loss) fluid.backward.append_backward(loss)
def check_output(self, startup_on_cpu=False, atol=1e-5): def check_output(self, startup_on_cpu=False, atol=1e-5):
......
...@@ -41,7 +41,7 @@ class TestQuantizationSubGraph(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestQuantizationSubGraph(unittest.TestCase):
for _ in six.moves.xrange(num): for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu') hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label) loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
main_program = Program() main_program = Program()
......
...@@ -148,7 +148,7 @@ class TestMomentumV2(unittest.TestCase): ...@@ -148,7 +148,7 @@ class TestMomentumV2(unittest.TestCase):
y = fluid.layers.data(name='y', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1, rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1,
momentum=0.9) momentum=0.9)
...@@ -271,7 +271,7 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): ...@@ -271,7 +271,7 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase):
y = fluid.layers.data(name='y', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum( momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum(
learning_rate=0.1, momentum=0.9) learning_rate=0.1, momentum=0.9)
...@@ -591,7 +591,7 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): ...@@ -591,7 +591,7 @@ class TestMultiTensorMomentumStatic(unittest.TestCase):
name='X', name='X',
dtype='float32') dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10) hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.fluid.layers.mean(hidden) loss = paddle.mean(hidden)
optimizer.minimize(loss) optimizer.minimize(loss)
exe.run(startup_program) exe.run(startup_program)
if use_amp: if use_amp:
......
...@@ -107,7 +107,7 @@ class TestWhereAPI(unittest.TestCase): ...@@ -107,7 +107,7 @@ class TestWhereAPI(unittest.TestCase):
x.stop_gradient = x_stop_gradient x.stop_gradient = x_stop_gradient
y.stop_gradient = y_stop_gradient y.stop_gradient = y_stop_gradient
result = paddle.where(cond, x, y) result = paddle.where(cond, x, y)
append_backward(layers.mean(result)) append_backward(paddle.mean(result))
for use_mlu in [False, True]: for use_mlu in [False, True]:
place = (paddle.device.MLUPlace(0) place = (paddle.device.MLUPlace(0)
if use_mlu else fluid.CPUPlace()) if use_mlu else fluid.CPUPlace())
......
...@@ -117,7 +117,7 @@ class TestMomentumV2(unittest.TestCase): ...@@ -117,7 +117,7 @@ class TestMomentumV2(unittest.TestCase):
y = fluid.layers.data(name='y', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1, rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1,
momentum=0.9) momentum=0.9)
...@@ -243,7 +243,7 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): ...@@ -243,7 +243,7 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase):
y = fluid.layers.data(name='y', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum( momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum(
learning_rate=0.1, momentum=0.9) learning_rate=0.1, momentum=0.9)
......
...@@ -86,7 +86,7 @@ class TestSoftmaxNet(unittest.TestCase): ...@@ -86,7 +86,7 @@ class TestSoftmaxNet(unittest.TestCase):
prob = fluid.layers.softmax(prediction, axis=1) prob = fluid.layers.softmax(prediction, axis=1)
cost = fluid.layers.cross_entropy(input=prob, label=label) cost = fluid.layers.cross_entropy(input=prob, label=label)
loss = fluid.layers.mean(cost) loss = paddle.mean(cost)
sgd = fluid.optimizer.SGD(learning_rate=0.01) sgd = fluid.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss) sgd.minimize(loss)
......
...@@ -105,7 +105,7 @@ class TestNPUWhereAPI(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestNPUWhereAPI(unittest.TestCase):
y.stop_gradient = y_stop_gradient y.stop_gradient = y_stop_gradient
result = paddle.where(cond, x, y) result = paddle.where(cond, x, y)
append_backward(fluid.layers.mean(result)) append_backward(paddle.mean(result))
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
exe.run(startup) exe.run(startup)
......
...@@ -90,7 +90,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestWhileOp(unittest.TestCase):
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
layers.less_than(x=j, y=array_len2, cond=cond2) layers.less_than(x=j, y=array_len2, cond=cond2)
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
loss = layers.mean(sum_result) loss = paddle.mean(sum_result)
return loss, sum_result return loss, sum_result
def test_simple_net(self): def test_simple_net(self):
......
...@@ -112,7 +112,7 @@ class MNIST(fluid.dygraph.Layer): ...@@ -112,7 +112,7 @@ class MNIST(fluid.dygraph.Layer):
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape]) x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape])
cost = self._fc(x) cost = self._fc(x)
loss = fluid.layers.cross_entropy(cost, label) loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.mean(loss)
return avg_loss return avg_loss
......
...@@ -335,7 +335,7 @@ class TestSeResNeXt(TestParallelDyGraphRunnerBase): ...@@ -335,7 +335,7 @@ class TestSeResNeXt(TestParallelDyGraphRunnerBase):
out = model(img) out = model(img)
softmax_out = fluid.layers.softmax(out, use_cudnn=False) softmax_out = fluid.layers.softmax(out, use_cudnn=False)
loss = fluid.layers.cross_entropy(input=softmax_out, label=label) loss = fluid.layers.cross_entropy(input=softmax_out, label=label)
avg_loss = fluid.layers.mean(x=loss) avg_loss = paddle.mean(x=loss)
return avg_loss return avg_loss
......
...@@ -94,7 +94,7 @@ class TestSyncBatchNorm(TestParallelDyGraphRunnerBase): ...@@ -94,7 +94,7 @@ class TestSyncBatchNorm(TestParallelDyGraphRunnerBase):
out = model(img) out = model(img)
out = fluid.layers.mean(out) out = paddle.mean(out)
return out return out
......
...@@ -104,7 +104,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -104,7 +104,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
predict = cnn_model(images) predict = cnn_model(images)
with fluid.device_guard("gpu:1"): with fluid.device_guard("gpu:1"):
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
with fluid.device_guard("gpu:1"): with fluid.device_guard("gpu:1"):
......
...@@ -104,7 +104,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -104,7 +104,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
predict = cnn_model(images) predict = cnn_model(images)
with fluid.device_guard("gpu:1"): with fluid.device_guard("gpu:1"):
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
with fluid.device_guard("gpu:1"): with fluid.device_guard("gpu:1"):
......
...@@ -98,7 +98,7 @@ class TestDistMnist2x2(TestDistRunnerBase): ...@@ -98,7 +98,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
predict = cnn_model(images) predict = cnn_model(images)
with fluid.device_guard("gpu:0"): with fluid.device_guard("gpu:0"):
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
# Evaluator # Evaluator
with fluid.device_guard("gpu:0"): with fluid.device_guard("gpu:0"):
......
...@@ -17,6 +17,7 @@ import paddle.fluid as fluid ...@@ -17,6 +17,7 @@ import paddle.fluid as fluid
fluid.core._set_eager_deletion_mode(-1, -1, False) fluid.core._set_eager_deletion_mode(-1, -1, False)
import paddle
import paddle.fluid.layers.ops as ops import paddle.fluid.layers.ops as ops
from paddle.fluid.layers.learning_rate_scheduler import cosine_decay from paddle.fluid.layers.learning_rate_scheduler import cosine_decay
from simple_nets import init_data from simple_nets import init_data
...@@ -172,7 +173,7 @@ def SE_ResNeXt50Small(use_feed): ...@@ -172,7 +173,7 @@ def SE_ResNeXt50Small(use_feed):
# Classifier layer: # Classifier layer:
prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax') prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
...@@ -27,7 +28,7 @@ def simple_fc_net_with_inputs(img, label, class_num=10): ...@@ -27,7 +28,7 @@ def simple_fc_net_with_inputs(img, label, class_num=10):
value=1.0))) value=1.0)))
prediction = fluid.layers.fc(hidden, size=class_num, act='softmax') prediction = fluid.layers.fc(hidden, size=class_num, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
...@@ -51,7 +52,7 @@ def batchnorm_fc_with_inputs(img, label, class_num=10): ...@@ -51,7 +52,7 @@ def batchnorm_fc_with_inputs(img, label, class_num=10):
prediction = fluid.layers.fc(hidden, size=class_num, act='softmax') prediction = fluid.layers.fc(hidden, size=class_num, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss) loss = paddle.mean(loss)
return loss return loss
...@@ -87,7 +88,7 @@ def bow_net(use_feed, ...@@ -87,7 +88,7 @@ def bow_net(use_feed,
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost) avg_cost = paddle.mean(x=cost)
return avg_cost return avg_cost
......
...@@ -136,7 +136,7 @@ class TestAdadeltaV2(unittest.TestCase): ...@@ -136,7 +136,7 @@ class TestAdadeltaV2(unittest.TestCase):
y = fluid.layers.data(name='y', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None) y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y) cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost) avg_cost = paddle.mean(cost)
rms_optimizer = paddle.optimizer.Adadelta(learning_rate=0.1) rms_optimizer = paddle.optimizer.Adadelta(learning_rate=0.1)
rms_optimizer.minimize(avg_cost) rms_optimizer.minimize(avg_cost)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册