未验证 提交 efc094f5 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #8543 from reyoung/feature/enhance_layer_generator

Enhance layer_function_generator
......@@ -27,7 +27,6 @@ third_party/
cmake-build-*
# generated while compiling
python/paddle/v2/fluid/core.so
paddle/pybind/pybind.h
CMakeFiles
cmake_install.cmake
......
......@@ -130,7 +130,7 @@ def generate_layer_fn(op_type):
o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs]
def infer_and_check_dtype(op_proto, **kwargs):
def infer_and_check_dtype(op_proto, *args, **kwargs):
"""
This function performs the sanity check for dtype and
instance type.
......@@ -141,6 +141,10 @@ def generate_layer_fn(op_type):
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0:
val = [args[0]]
args = args[1:]
for each in val:
if not isinstance(each, Variable):
raise ValueError("input of {0} must be variable".format(
......@@ -155,10 +159,10 @@ def generate_layer_fn(op_type):
return dtype
def func(**kwargs):
def func(*args, **kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_dtype(op_proto, **kwargs)
dtype = infer_and_check_dtype(op_proto, *args, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
......@@ -166,6 +170,9 @@ def generate_layer_fn(op_type):
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0 and len(args) != 0:
val = args[0]
args = args[1:]
inputs[ipt.name] = val
outputs = dict()
......
......@@ -160,8 +160,8 @@ def sums(input, out=None):
a0 = layers.array_read(array=tmp, i=i)
i = layers.increment(x=i)
a1 = layers.array_read(array=tmp, i=i)
mean_a0 = layers.mean(x=a0)
mean_a1 = layers.mean(x=a1)
mean_a0 = layers.mean(a0)
mean_a1 = layers.mean(a1)
a_sum = layers.sums(input=[mean_a0, mean_a1])
"""
helper = LayerHelper('sum', **locals())
......
......@@ -147,7 +147,7 @@ def seq_to_seq_net():
label = fluid.layers.data(
name='label_sequence', shape=[1], dtype='int64', lod_level=1)
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
return avg_cost, prediction
......
......@@ -29,7 +29,7 @@ def train(use_cuda, save_dirname):
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost)
......
......@@ -110,7 +110,7 @@ def train(net_type, use_cuda, save_dirname):
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label)
# Test program
......
......@@ -164,7 +164,7 @@ def train(use_cuda, save_dirname=None):
label=target,
param_attr=fluid.ParamAttr(
name='crfw', learning_rate=mix_hidden_lr))
avg_cost = fluid.layers.mean(x=crf_cost)
avg_cost = fluid.layers.mean(crf_cost)
# TODO(qiao)
# check other optimizers and check why out will be NAN
......
......@@ -178,7 +178,7 @@ def train_main(use_cuda, is_sparse):
label = pd.data(
name="target_language_next_word", shape=[1], dtype='int64', lod_level=1)
cost = pd.cross_entropy(input=rnn_out, label=label)
avg_cost = pd.mean(x=cost)
avg_cost = pd.mean(cost)
optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
optimizer.minimize(avg_cost)
......
......@@ -48,7 +48,7 @@ BATCH_SIZE = 64
def loss_net(hidden, label):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(x=loss)
avg_loss = fluid.layers.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
return prediction, avg_loss, acc
......@@ -101,8 +101,8 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename):
avg_loss, acc = pd()
# get mean loss and acc through every devices.
avg_loss = fluid.layers.mean(x=avg_loss)
acc = fluid.layers.mean(x=acc)
avg_loss = fluid.layers.mean(avg_loss)
acc = fluid.layers.mean(acc)
else:
prediction, avg_loss, acc = net_conf(img, label)
......
......@@ -147,7 +147,7 @@ def model():
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(x=square_cost)
avg_cost = layers.mean(square_cost)
return scale_infer, avg_cost
......
......@@ -42,7 +42,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
size=class_dim,
act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
......@@ -82,7 +82,7 @@ def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32,
last = fluid.layers.sequence_last_step(rnn())
prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
......@@ -119,7 +119,7 @@ def stacked_lstm_net(data,
size=class_dim,
act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
......@@ -158,8 +158,8 @@ def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None):
pd.write_output(acc)
cost, acc = pd()
cost = fluid.layers.mean(x=cost)
acc_out = fluid.layers.mean(x=acc)
cost = fluid.layers.mean(cost)
acc_out = fluid.layers.mean(acc)
prediction = None
assert save_dirname is None
......
......@@ -118,7 +118,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname):
size=dict_size,
act='softmax')
cost = fluid.layers.cross_entropy(input=predict_word, label=words[4])
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
return avg_cost, predict_word
word_dict = paddle.dataset.imikolov.build_dict()
......@@ -143,7 +143,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname):
]))
pd.write_output(avg_cost)
avg_cost = fluid.layers.mean(x=pd())
avg_cost = fluid.layers.mean(pd())
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost)
......
......@@ -24,7 +24,7 @@ y_predict = fluid.layers.fc(input=x, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
......
......@@ -114,7 +114,7 @@ else:
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimize_ops, params_grads = optimizer.minimize(avg_cost)
......
......@@ -154,7 +154,7 @@ def main():
label=target,
param_attr=fluid.ParamAttr(
name='crfw', learning_rate=mix_hidden_lr))
avg_cost = fluid.layers.mean(x=crf_cost)
avg_cost = fluid.layers.mean(crf_cost)
# TODO(qiao)
# check other optimizers and check why out will be NAN
......
......@@ -65,7 +65,7 @@ concat_embed = fluid.layers.concat(
hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid')
predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax')
cost = fluid.layers.cross_entropy(input=predict_word, label=next_word)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
train_reader = paddle.batch(
......
......@@ -94,7 +94,7 @@ def main():
label = layers.data(
name="target_language_next_word", shape=[1], dtype='int64', lod_level=1)
cost = layers.cross_entropy(input=rnn_out, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
optimize_ops, params_grads = optimizer.minimize(avg_cost)
......
......@@ -37,7 +37,7 @@ conv_pool_2 = fluid.nets.simple_img_conv_pool(
predict = fluid.layers.fc(input=conv_pool_2, size=10, act="softmax")
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.01)
optimize_ops, params_grads = optimizer.minimize(avg_cost)
......
......@@ -32,7 +32,7 @@ predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
optimize_ops, params_grads = optimizer.minimize(avg_cost)
......
......@@ -117,7 +117,7 @@ def model():
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(x=square_cost)
avg_cost = layers.mean(square_cost)
return avg_cost
......
......@@ -38,7 +38,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
size=class_dim,
act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002)
optimize_ops, params_grads = adam_optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=prediction, label=label)
......
......@@ -49,7 +49,7 @@ def stacked_lstm_net(data,
size=class_dim,
act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002)
optimize_ops, params_grads = adam_optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=prediction, label=label)
......
......@@ -30,7 +30,7 @@ y_predict = fluid.layers.fc(input=x, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
sgd_optimizer.minimize(avg_cost)
......
......@@ -117,7 +117,7 @@ else:
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)
......
......@@ -100,7 +100,7 @@ def main():
label = layers.data(
name="target_language_next_word", shape=[1], dtype='int64', lod_level=1)
cost = layers.cross_entropy(input=rnn_out, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
optimizer.minimize(avg_cost)
......
......@@ -96,7 +96,7 @@ def main():
x=D(img),
label=fluid.layers.data(
name='label', shape=[1], dtype='float32'))
d_loss = fluid.layers.mean(x=d_loss)
d_loss = fluid.layers.mean(d_loss)
with fluid.program_guard(dg_program, startup_program):
noise = fluid.layers.data(
......@@ -107,7 +107,7 @@ def main():
x=D(g_img),
label=fluid.layers.fill_constant_batch_size_like(
input=noise, dtype='float32', shape=[-1, 1], value=1.0))
dg_loss = fluid.layers.mean(x=dg_loss)
dg_loss = fluid.layers.mean(dg_loss)
opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE)
......
......@@ -33,7 +33,7 @@ with fluid.program_guard(main_program=prog):
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
prog_clip = prog.clone()
prog_clip.block(0).var(hidden1.name).set_error_clip(
......
......@@ -30,7 +30,7 @@ with fluid.program_guard(main_program=prog):
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
prog_clip = prog.clone()
......
......@@ -56,7 +56,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
prob = layers.merge_lod_tensor(
in_true=true_out, in_false=false_out, mask=cond, x=image)
loss = layers.cross_entropy(input=prob, label=label)
avg_loss = layers.mean(x=loss)
avg_loss = layers.mean(loss)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, startup_prog)
......@@ -113,7 +113,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
prob = ie()
loss = layers.cross_entropy(input=prob[0], label=label)
avg_loss = layers.mean(x=loss)
avg_loss = layers.mean(loss)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, startup_prog)
......
......@@ -49,15 +49,15 @@ class TestArrayReadWrite(unittest.TestCase):
i = layers.increment(x=i)
a2 = layers.array_read(array=arr, i=i)
mean_a0 = layers.mean(x=a0)
mean_a1 = layers.mean(x=a1)
mean_a2 = layers.mean(x=a2)
mean_a0 = layers.mean(a0)
mean_a1 = layers.mean(a1)
mean_a2 = layers.mean(a2)
a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])
mean_x0 = layers.mean(x=x[0])
mean_x1 = layers.mean(x=x[1])
mean_x2 = layers.mean(x=x[2])
mean_x0 = layers.mean(x[0])
mean_x1 = layers.mean(x[1])
mean_x2 = layers.mean(x[2])
x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])
......
......@@ -26,7 +26,7 @@ class TestCalcGradient(unittest.TestCase):
x = layers.create_parameter(dtype="float32", shape=[5, 10])
y = layers.create_parameter(dtype="float32", shape=[10, 8])
mul_out = layers.mul(x=x, y=y)
mean_out = layers.mean(x=mul_out)
mean_out = layers.mean(mul_out)
a = calc_gradient(mean_out, mul_out)
b = calc_gradient(mean_out, x)
place = fluid.CPUPlace()
......
......@@ -39,7 +39,7 @@ class ConditionalBlock(unittest.TestCase):
outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
print outs
loss = layers.mean(x=out)
loss = layers.mean(out)
append_backward(loss=loss)
outs = exe.run(
feed={'X': x},
......
......@@ -81,7 +81,7 @@ class TestDynRNN(unittest.TestCase):
logits = fluid.layers.fc(input=last, size=1, act=None)
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=logits, label=label)
loss = fluid.layers.mean(x=loss)
loss = fluid.layers.mean(loss)
sgd = fluid.optimizer.SGD(1e-4)
sgd.minimize(loss=loss)
cpu = fluid.CPUPlace()
......@@ -119,7 +119,7 @@ class TestDynRNN(unittest.TestCase):
label = fluid.layers.data(name='label', shape=[1], dtype='float32')
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=logits, label=label)
loss = fluid.layers.mean(x=loss)
loss = fluid.layers.mean(loss)
sgd = fluid.optimizer.Adam(1e-3)
sgd.minimize(loss=loss)
......
......@@ -272,7 +272,7 @@ class TestSimpleMul(SeedFixedTestCase):
out = rnn()
out = fluid.layers.sequence_pool(out, pool_type='last')
loss = fluid.layers.mean(x=out)
loss = fluid.layers.mean(out)
fluid.backward.append_backward(loss)
cpu = fluid.CPUPlace()
......@@ -348,7 +348,7 @@ class TestSimpleMulWithMemory(SeedFixedTestCase):
out = rnn()
last = fluid.layers.sequence_pool(input=out, pool_type='last')
loss = fluid.layers.mean(x=last)
loss = fluid.layers.mean(last)
fluid.backward.append_backward(loss)
cpu = fluid.CPUPlace()
......
......@@ -125,7 +125,7 @@ class TestDyRnnStaticInput(unittest.TestCase):
return static_input_step_outs
last = fluid.layers.sequence_pool(input=rnn(), pool_type='last')
loss = fluid.layers.mean(x=last)
loss = fluid.layers.mean(last)
append_backward(loss)
static_input_grad = self._program.global_block().var(
framework.grad_var_name('static_input_tensor'))
......
......@@ -38,7 +38,7 @@ class TestBook(unittest.TestCase):
y_predict = layers.fc(input=x, size=1, act=None)
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(x=cost)
avg_cost = layers.mean(cost)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost, init_program)
......
......@@ -30,7 +30,7 @@ class TestBook(unittest.TestCase):
y_predict = layers.fc(input=x, size=1, act=None)
y = layers.data(name='y', shape=[1], dtype='float32')
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(x=cost)
avg_cost = layers.mean(cost)
self.assertIsNotNone(avg_cost)
program.append_backward(avg_cost)
......@@ -49,7 +49,7 @@ class TestBook(unittest.TestCase):
act='softmax',
param_attr=["sftmax.w1", "sftmax.w2"])
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
avg_cost = layers.mean(cost)
self.assertIsNotNone(avg_cost)
print(str(program))
......@@ -92,7 +92,7 @@ class TestBook(unittest.TestCase):
predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
avg_cost = layers.mean(cost)
program.append_backward(avg_cost)
......@@ -140,7 +140,7 @@ class TestBook(unittest.TestCase):
size=dict_size,
act='softmax')
cost = layers.cross_entropy(input=predict_word, label=next_word)
avg_cost = layers.mean(x=cost)
avg_cost = layers.mean(cost)
self.assertIsNotNone(avg_cost)
print(str(program))
......@@ -287,7 +287,7 @@ class TestBook(unittest.TestCase):
num_total_classes=dict_size,
param_attr='nce.w',
bias_attr='nce.b')
avg_loss = layers.mean(x=loss)
avg_loss = layers.mean(loss)
self.assertIsNotNone(avg_loss)
print(str(default_main_program()))
......
......@@ -182,7 +182,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
array = layers.lod_tensor_to_array(x, table)
result = layers.array_to_lod_tensor(array, table)
mean = layers.mean(x=result)
mean = layers.mean(result)
append_backward(mean)
......
......@@ -29,7 +29,7 @@ class TestControlFlowGraph(unittest.TestCase):
y_predict = layers.fc(input=x, size=1, act=None)
y = layers.data(name='y', shape=[1], dtype='float32')
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(x=cost)
avg_cost = layers.mean(cost)
opt = optimizer.SGD(learning_rate=0.001)
opt = opt.minimize(avg_cost)
......
......@@ -127,7 +127,7 @@ class BaseParallelForTest(unittest.TestCase):
data = next(generator)
loss = generator.send(data)
self.assertIsNotNone(loss)
avg_loss = fluid.layers.mean(x=loss)
avg_loss = fluid.layers.mean(loss)
fluid.backward.append_backward(loss=avg_loss)
exe = fluid.Executor(place)
......@@ -170,7 +170,7 @@ class ParallelOpTest(BaseParallelForTest):
x = fluid.layers.data(shape=[784], dtype='float32', name='img')
x = yield x
hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
loss = fluid.layers.mean(x=hidden)
loss = fluid.layers.mean(hidden)
yield loss
def test_simple_fc(self):
......@@ -200,7 +200,7 @@ class ParallelOpTestMultipleInput(BaseParallelForTest):
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.fc(input=hidden1, size=200, param_attr='fc2.w')
hidden3 = fluid.layers.fc(input=hidden2, size=200, param_attr='fc3.w')
loss = fluid.layers.mean(x=hidden3)
loss = fluid.layers.mean(hidden3)
yield loss
def test_simple_fc(self):
......
......@@ -35,7 +35,7 @@ class TestPrintOpCPU(unittest.TestCase):
x.stop_gradient = False
printed = layers.Print(input=x, **kargs)
if only_forward: return printed
loss = layers.mean(x=printed)
loss = layers.mean(printed)
append_backward(loss=loss)
return loss
......
......@@ -54,7 +54,7 @@ class TestProfiler(unittest.TestCase):
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
......
......@@ -127,7 +127,7 @@ class RecurrentOpTest1(unittest.TestCase):
self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape)
self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
self.output = layers.mean(self.create_rnn_op(), **self.p_info)
def create_rnn_op(self):
x = layers.data(
......@@ -261,7 +261,7 @@ class RecurrentOpTest2(RecurrentOpTest1):
self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape)
self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
self.output = layers.mean(self.create_rnn_op(), **self.p_info)
def create_rnn_op(self):
x = layers.data(
......@@ -360,7 +360,7 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1):
self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3(
self.input_shape, self.output_shape)
self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
self.output = layers.mean(self.create_rnn_op(), **self.p_info)
def create_rnn_op(self):
x = layers.data(
......@@ -444,7 +444,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1):
self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape,
self.output_shape)
self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
self.output = layers.mean(self.create_rnn_op(), **self.p_info)
print self.main_program
def create_rnn_op(self):
......
......@@ -22,7 +22,7 @@ class TestRegistry(unittest.TestCase):
@decorators.prog_scope()
def test_registry_layer(self):
x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32')
output = fluid.layers.mean(x=x)
output = fluid.layers.mean(x)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
......
......@@ -39,7 +39,7 @@ class TestShrinkRNNMemoryBase(unittest.TestCase):
i = layers.increment(x=i)
i.stop_gradient = True
self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
mem3_mean = layers.mean(x=self.mem3)
mem3_mean = layers.mean(self.mem3)
append_backward(loss=mem3_mean)
self.x_grad = self.main_program.global_block().var('x@GRAD')
......
......@@ -145,7 +145,7 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
mean = layers.mean(x=out)
mean = layers.mean(out)
append_backward(mean)
......
......@@ -58,7 +58,7 @@ class TestWhileOp(unittest.TestCase):
layers.less_than(x=i, y=array_len, cond=cond)
sum_result = layers.array_read(array=mem_array, i=i)
loss = layers.mean(x=sum_result)
loss = layers.mean(sum_result)
append_backward(loss)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册