From 3959023099f25c590ec72f701976c7b4e1233174 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 24 Feb 2018 16:28:13 +0800 Subject: [PATCH] Enhance layer_function_generator * Generated functions can take `*args` as inputs. --- .../v2/fluid/layers/layer_function_generator.py | 5 ++++- python/paddle/v2/fluid/layers/tensor.py | 4 ++-- .../v2/fluid/tests/book/notest_rnn_encoder_decoer.py | 2 +- python/paddle/v2/fluid/tests/book/test_fit_a_line.py | 2 +- .../v2/fluid/tests/book/test_image_classification.py | 2 +- .../v2/fluid/tests/book/test_label_semantic_roles.py | 2 +- .../v2/fluid/tests/book/test_machine_translation.py | 2 +- .../v2/fluid/tests/book/test_recognize_digits.py | 6 +++--- .../v2/fluid/tests/book/test_recommender_system.py | 2 +- .../v2/fluid/tests/book/test_understand_sentiment.py | 10 +++++----- python/paddle/v2/fluid/tests/book/test_word2vec.py | 4 ++-- .../tests/book_distribute/notest_dist_fit_a_line.py | 2 +- .../notest_dist_image_classification.py | 2 +- .../notest_dist_label_semantic_roles.py | 2 +- .../tests/book_distribute/notest_dist_word2vec.py | 2 +- .../book_distribute/notest_machine_translation.py | 2 +- .../notest_recognize_digits_conv_dist.py | 2 +- .../notest_recognize_digits_mlp_dist.py | 2 +- .../notest_recommender_system_dist.py | 2 +- .../notest_understand_sentiment_conv_dist.py | 2 +- .../notest_understand_sentiment_dynamic_lstm.py | 2 +- .../test_memopt_fit_a_line.py | 2 +- .../test_memopt_image_classification_train.py | 2 +- .../test_memopt_machine_translation.py | 2 +- python/paddle/v2/fluid/tests/demo/fc_gan.py | 4 ++-- python/paddle/v2/fluid/tests/test_error_clip.py | 2 +- python/paddle/v2/fluid/tests/test_gradient_clip.py | 2 +- .../paddle/v2/fluid/tests/test_mnist_if_else_op.py | 4 ++-- .../tests/unittests/test_array_read_write_op.py | 12 ++++++------ .../v2/fluid/tests/unittests/test_calc_gradient.py | 2 +- .../fluid/tests/unittests/test_conditional_block.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_dyn_rnn.py | 4 ++-- .../tests/unittests/test_dynrnn_gradient_check.py | 4 ++-- .../tests/unittests/test_dynrnn_static_input.py | 2 +- .../fluid/tests/unittests/test_inference_model_io.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_layers.py | 10 +++++----- .../tests/unittests/test_lod_tensor_array_ops.py | 2 +- .../unittests/test_memory_optimization_transpiler.py | 2 +- .../v2/fluid/tests/unittests/test_parallel_op.py | 6 +++--- .../paddle/v2/fluid/tests/unittests/test_print_op.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_profiler.py | 2 +- .../v2/fluid/tests/unittests/test_recurrent_op.py | 8 ++++---- .../paddle/v2/fluid/tests/unittests/test_registry.py | 2 +- .../fluid/tests/unittests/test_shrink_rnn_memory.py | 2 +- .../unittests/test_split_and_merge_lod_tensor_op.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_while_op.py | 2 +- 46 files changed, 75 insertions(+), 72 deletions(-) diff --git a/python/paddle/v2/fluid/layers/layer_function_generator.py b/python/paddle/v2/fluid/layers/layer_function_generator.py index 88c9ae31b7..16a401dc7b 100644 --- a/python/paddle/v2/fluid/layers/layer_function_generator.py +++ b/python/paddle/v2/fluid/layers/layer_function_generator.py @@ -155,7 +155,7 @@ def generate_layer_fn(op_type): return dtype - def func(**kwargs): + def func(*args, **kwargs): helper = LayerHelper(op_type, **kwargs) dtype = infer_and_check_dtype(op_proto, **kwargs) @@ -166,6 +166,9 @@ def generate_layer_fn(op_type): val = kwargs.pop(name, []) if not isinstance(val, list) and not isinstance(val, tuple): val = [val] + if len(val) == 0 and len(args) != 0: + val = args[0] + args = args[1:] inputs[ipt.name] = val outputs = dict() diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 97e8f082cf..8100e8f034 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -160,8 +160,8 @@ def sums(input, out=None): a0 = layers.array_read(array=tmp, i=i) i = layers.increment(x=i) a1 = layers.array_read(array=tmp, i=i) - mean_a0 = layers.mean(x=a0) - mean_a1 = layers.mean(x=a1) + mean_a0 = layers.mean(a0) + mean_a1 = layers.mean(a1) a_sum = layers.sums(input=[mean_a0, mean_a1]) """ helper = LayerHelper('sum', **locals()) diff --git a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py index c7db70f1b1..0054bb6bec 100644 --- a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py +++ b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py @@ -147,7 +147,7 @@ def seq_to_seq_net(): label = fluid.layers.data( name='label_sequence', shape=[1], dtype='int64', lod_level=1) cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) return avg_cost, prediction diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index a66c2c3c2f..77cffd4de9 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -29,7 +29,7 @@ def train(use_cuda, save_dirname): y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification.py b/python/paddle/v2/fluid/tests/book/test_image_classification.py index 734ab3e4fb..b99a4285aa 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification.py @@ -110,7 +110,7 @@ def train(net_type, use_cuda, save_dirname): predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) # Test program diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index b790246ec1..e513a658ff 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -164,7 +164,7 @@ def train(use_cuda, save_dirname=None): label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=mix_hidden_lr)) - avg_cost = fluid.layers.mean(x=crf_cost) + avg_cost = fluid.layers.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN diff --git a/python/paddle/v2/fluid/tests/book/test_machine_translation.py b/python/paddle/v2/fluid/tests/book/test_machine_translation.py index d3405a9601..ee6a5d61df 100644 --- a/python/paddle/v2/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book/test_machine_translation.py @@ -178,7 +178,7 @@ def train_main(use_cuda, is_sparse): label = pd.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = pd.cross_entropy(input=rnn_out, label=label) - avg_cost = pd.mean(x=cost) + avg_cost = pd.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index 2462d425e1..aeeea1177e 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -48,7 +48,7 @@ BATCH_SIZE = 64 def loss_net(hidden, label): prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_loss = fluid.layers.mean(x=loss) + avg_loss = fluid.layers.mean(loss) acc = fluid.layers.accuracy(input=prediction, label=label) return prediction, avg_loss, acc @@ -101,8 +101,8 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): avg_loss, acc = pd() # get mean loss and acc through every devices. - avg_loss = fluid.layers.mean(x=avg_loss) - acc = fluid.layers.mean(x=acc) + avg_loss = fluid.layers.mean(avg_loss) + acc = fluid.layers.mean(acc) else: prediction, avg_loss, acc = net_conf(img, label) diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index 1a7d8d57ff..a5adc3507b 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -147,7 +147,7 @@ def model(): label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(x=square_cost) + avg_cost = layers.mean(square_cost) return scale_infer, avg_cost diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 61f46b51c4..cdd233a5b6 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -42,7 +42,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -82,7 +82,7 @@ def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32, last = fluid.layers.sequence_last_step(rnn()) prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -119,7 +119,7 @@ def stacked_lstm_net(data, size=class_dim, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -158,8 +158,8 @@ def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): pd.write_output(acc) cost, acc = pd() - cost = fluid.layers.mean(x=cost) - acc_out = fluid.layers.mean(x=acc) + cost = fluid.layers.mean(cost) + acc_out = fluid.layers.mean(acc) prediction = None assert save_dirname is None diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 9bd8f90c5e..ac23bd7284 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -118,7 +118,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname): size=dict_size, act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) return avg_cost, predict_word word_dict = paddle.dataset.imikolov.build_dict() @@ -143,7 +143,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname): ])) pd.write_output(avg_cost) - avg_cost = fluid.layers.mean(x=pd()) + avg_cost = fluid.layers.mean(pd()) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py index c443c4e0b7..164327d8f0 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -24,7 +24,7 @@ y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py index 298ecfc386..6ba06a6038 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -114,7 +114,7 @@ else: predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 1210bf1d84..fa4bf33cea 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -154,7 +154,7 @@ def main(): label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=mix_hidden_lr)) - avg_cost = fluid.layers.mean(x=crf_cost) + avg_cost = fluid.layers.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py index 0d5ad98850..aff4c53ebc 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -65,7 +65,7 @@ concat_embed = fluid.layers.concat( hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) train_reader = paddle.batch( diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py index 15d2d40979..5406bd9113 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py @@ -94,7 +94,7 @@ def main(): label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index 1c1fffc589..f6623099cb 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -37,7 +37,7 @@ conv_pool_2 = fluid.nets.simple_img_conv_pool( predict = fluid.layers.fc(input=conv_pool_2, size=10, act="softmax") cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.01) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py index c442ada6e3..f2d32cb99d 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py @@ -32,7 +32,7 @@ predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py index 363c7102c7..907b09a38b 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py @@ -117,7 +117,7 @@ def model(): label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(x=square_cost) + avg_cost = layers.mean(square_cost) return avg_cost diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index c5c0856c31..f95b4a9a02 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -38,7 +38,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py index 99e2c2bbac..5212319435 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -49,7 +49,7 @@ def stacked_lstm_net(data, size=class_dim, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index 944f8af086..04ab2d1d07 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -30,7 +30,7 @@ y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index a556904107..307e6035f4 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -117,7 +117,7 @@ else: predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.001) opts = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index 4c1eae861b..3de46e8c27 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -100,7 +100,7 @@ def main(): label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/demo/fc_gan.py b/python/paddle/v2/fluid/tests/demo/fc_gan.py index 67921db04a..a0d3721ea4 100644 --- a/python/paddle/v2/fluid/tests/demo/fc_gan.py +++ b/python/paddle/v2/fluid/tests/demo/fc_gan.py @@ -96,7 +96,7 @@ def main(): x=D(img), label=fluid.layers.data( name='label', shape=[1], dtype='float32')) - d_loss = fluid.layers.mean(x=d_loss) + d_loss = fluid.layers.mean(d_loss) with fluid.program_guard(dg_program, startup_program): noise = fluid.layers.data( @@ -107,7 +107,7 @@ def main(): x=D(g_img), label=fluid.layers.fill_constant_batch_size_like( input=noise, dtype='float32', shape=[-1, 1], value=1.0)) - dg_loss = fluid.layers.mean(x=dg_loss) + dg_loss = fluid.layers.mean(dg_loss) opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) diff --git a/python/paddle/v2/fluid/tests/test_error_clip.py b/python/paddle/v2/fluid/tests/test_error_clip.py index d577d0014d..99b69c1625 100644 --- a/python/paddle/v2/fluid/tests/test_error_clip.py +++ b/python/paddle/v2/fluid/tests/test_error_clip.py @@ -33,7 +33,7 @@ with fluid.program_guard(main_program=prog): label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() prog_clip.block(0).var(hidden1.name).set_error_clip( diff --git a/python/paddle/v2/fluid/tests/test_gradient_clip.py b/python/paddle/v2/fluid/tests/test_gradient_clip.py index 792262df84..c20863ddb2 100644 --- a/python/paddle/v2/fluid/tests/test_gradient_clip.py +++ b/python/paddle/v2/fluid/tests/test_gradient_clip.py @@ -30,7 +30,7 @@ with fluid.program_guard(main_program=prog): label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() diff --git a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py index 75a651cf27..e5a1406b93 100644 --- a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py @@ -56,7 +56,7 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = layers.merge_lod_tensor( in_true=true_out, in_false=false_out, mask=cond, x=image) loss = layers.cross_entropy(input=prob, label=label) - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) @@ -113,7 +113,7 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = ie() loss = layers.cross_entropy(input=prob[0], label=label) - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) diff --git a/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py index 8917b9b906..e04f682ece 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py @@ -49,15 +49,15 @@ class TestArrayReadWrite(unittest.TestCase): i = layers.increment(x=i) a2 = layers.array_read(array=arr, i=i) - mean_a0 = layers.mean(x=a0) - mean_a1 = layers.mean(x=a1) - mean_a2 = layers.mean(x=a2) + mean_a0 = layers.mean(a0) + mean_a1 = layers.mean(a1) + mean_a2 = layers.mean(a2) a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2]) - mean_x0 = layers.mean(x=x[0]) - mean_x1 = layers.mean(x=x[1]) - mean_x2 = layers.mean(x=x[2]) + mean_x0 = layers.mean(x[0]) + mean_x1 = layers.mean(x[1]) + mean_x2 = layers.mean(x[2]) x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2]) diff --git a/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py index 1b38dcf343..1b0de31ae0 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py @@ -26,7 +26,7 @@ class TestCalcGradient(unittest.TestCase): x = layers.create_parameter(dtype="float32", shape=[5, 10]) y = layers.create_parameter(dtype="float32", shape=[10, 8]) mul_out = layers.mul(x=x, y=y) - mean_out = layers.mean(x=mul_out) + mean_out = layers.mean(mul_out) a = calc_gradient(mean_out, mul_out) b = calc_gradient(mean_out, x) place = fluid.CPUPlace() diff --git a/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py b/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py index 58ac267203..f605e13d21 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py @@ -39,7 +39,7 @@ class ConditionalBlock(unittest.TestCase): outs = exe.run(feed={'X': x}, fetch_list=[out])[0] print outs - loss = layers.mean(x=out) + loss = layers.mean(out) append_backward(loss=loss) outs = exe.run( feed={'X': x}, diff --git a/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py index 1571572fc6..23a1555208 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py @@ -81,7 +81,7 @@ class TestDynRNN(unittest.TestCase): logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) - loss = fluid.layers.mean(x=loss) + loss = fluid.layers.mean(loss) sgd = fluid.optimizer.SGD(1e-4) sgd.minimize(loss=loss) cpu = fluid.CPUPlace() @@ -119,7 +119,7 @@ class TestDynRNN(unittest.TestCase): label = fluid.layers.data(name='label', shape=[1], dtype='float32') loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) - loss = fluid.layers.mean(x=loss) + loss = fluid.layers.mean(loss) sgd = fluid.optimizer.Adam(1e-3) sgd.minimize(loss=loss) diff --git a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py index 8b01ec730a..182b025be5 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -272,7 +272,7 @@ class TestSimpleMul(SeedFixedTestCase): out = rnn() out = fluid.layers.sequence_pool(out, pool_type='last') - loss = fluid.layers.mean(x=out) + loss = fluid.layers.mean(out) fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() @@ -348,7 +348,7 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): out = rnn() last = fluid.layers.sequence_pool(input=out, pool_type='last') - loss = fluid.layers.mean(x=last) + loss = fluid.layers.mean(last) fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() diff --git a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py index d2f05dcd14..b21ac8e800 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py @@ -125,7 +125,7 @@ class TestDyRnnStaticInput(unittest.TestCase): return static_input_step_outs last = fluid.layers.sequence_pool(input=rnn(), pool_type='last') - loss = fluid.layers.mean(x=last) + loss = fluid.layers.mean(last) append_backward(loss) static_input_grad = self._program.global_block().var( framework.grad_var_name('static_input_tensor')) diff --git a/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py index e381312ccc..62abe99aa2 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py @@ -38,7 +38,7 @@ class TestBook(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) sgd_optimizer.minimize(avg_cost, init_program) diff --git a/python/paddle/v2/fluid/tests/unittests/test_layers.py b/python/paddle/v2/fluid/tests/unittests/test_layers.py index e757598bba..149ac347ce 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_layers.py +++ b/python/paddle/v2/fluid/tests/unittests/test_layers.py @@ -30,7 +30,7 @@ class TestBook(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) y = layers.data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) program.append_backward(avg_cost) @@ -49,7 +49,7 @@ class TestBook(unittest.TestCase): act='softmax', param_attr=["sftmax.w1", "sftmax.w2"]) cost = layers.cross_entropy(input=predict, label=label) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) @@ -92,7 +92,7 @@ class TestBook(unittest.TestCase): predict = layers.fc(input=conv_pool_2, size=10, act="softmax") cost = layers.cross_entropy(input=predict, label=label) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) program.append_backward(avg_cost) @@ -140,7 +140,7 @@ class TestBook(unittest.TestCase): size=dict_size, act='softmax') cost = layers.cross_entropy(input=predict_word, label=next_word) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) @@ -287,7 +287,7 @@ class TestBook(unittest.TestCase): num_total_classes=dict_size, param_attr='nce.w', bias_attr='nce.b') - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) self.assertIsNotNone(avg_loss) print(str(default_main_program())) diff --git a/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py index ebc0a2f714..8c59bbb407 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -182,7 +182,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): array = layers.lod_tensor_to_array(x, table) result = layers.array_to_lod_tensor(array, table) - mean = layers.mean(x=result) + mean = layers.mean(result) append_backward(mean) diff --git a/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py b/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py index a276db581e..9d5f90c627 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py @@ -29,7 +29,7 @@ class TestControlFlowGraph(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) y = layers.data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) opt = optimizer.SGD(learning_rate=0.001) opt = opt.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py b/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py index d65752608b..8ace41020e 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py @@ -127,7 +127,7 @@ class BaseParallelForTest(unittest.TestCase): data = next(generator) loss = generator.send(data) self.assertIsNotNone(loss) - avg_loss = fluid.layers.mean(x=loss) + avg_loss = fluid.layers.mean(loss) fluid.backward.append_backward(loss=avg_loss) exe = fluid.Executor(place) @@ -170,7 +170,7 @@ class ParallelOpTest(BaseParallelForTest): x = fluid.layers.data(shape=[784], dtype='float32', name='img') x = yield x hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') - loss = fluid.layers.mean(x=hidden) + loss = fluid.layers.mean(hidden) yield loss def test_simple_fc(self): @@ -200,7 +200,7 @@ class ParallelOpTestMultipleInput(BaseParallelForTest): hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') hidden2 = fluid.layers.fc(input=hidden1, size=200, param_attr='fc2.w') hidden3 = fluid.layers.fc(input=hidden2, size=200, param_attr='fc3.w') - loss = fluid.layers.mean(x=hidden3) + loss = fluid.layers.mean(hidden3) yield loss def test_simple_fc(self): diff --git a/python/paddle/v2/fluid/tests/unittests/test_print_op.py b/python/paddle/v2/fluid/tests/unittests/test_print_op.py index 1e49ce994b..d11e3aeddf 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_print_op.py @@ -35,7 +35,7 @@ class TestPrintOpCPU(unittest.TestCase): x.stop_gradient = False printed = layers.Print(input=x, **kargs) if only_forward: return printed - loss = layers.mean(x=printed) + loss = layers.mean(printed) append_backward(loss=loss) return loss diff --git a/python/paddle/v2/fluid/tests/unittests/test_profiler.py b/python/paddle/v2/fluid/tests/unittests/test_profiler.py index 62bfb2b8e2..b4b8a58286 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/v2/fluid/tests/unittests/test_profiler.py @@ -54,7 +54,7 @@ class TestProfiler(unittest.TestCase): predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.evaluator.Accuracy(input=predict, label=label) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) diff --git a/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py index 177d8fc65f..0e747936fd 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py @@ -127,7 +127,7 @@ class RecurrentOpTest1(unittest.TestCase): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -261,7 +261,7 @@ class RecurrentOpTest2(RecurrentOpTest1): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -360,7 +360,7 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3( self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -444,7 +444,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) print self.main_program def create_rnn_op(self): diff --git a/python/paddle/v2/fluid/tests/unittests/test_registry.py b/python/paddle/v2/fluid/tests/unittests/test_registry.py index 82527a6ec7..b0ec218ab3 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_registry.py +++ b/python/paddle/v2/fluid/tests/unittests/test_registry.py @@ -22,7 +22,7 @@ class TestRegistry(unittest.TestCase): @decorators.prog_scope() def test_registry_layer(self): x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') - output = fluid.layers.mean(x=x) + output = fluid.layers.mean(x) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py index 48874ba8a5..c991fbbaa2 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -39,7 +39,7 @@ class TestShrinkRNNMemoryBase(unittest.TestCase): i = layers.increment(x=i) i.stop_gradient = True self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table) - mem3_mean = layers.mean(x=self.mem3) + mem3_mean = layers.mean(self.mem3) append_backward(loss=mem3_mean) self.x_grad = self.main_program.global_block().var('x@GRAD') diff --git a/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py index 48e6756a86..8cfbd7881a 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py @@ -145,7 +145,7 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): input=x, mask=y, level=level) out = layers.merge_lod_tensor( in_true=out_true, in_false=out_false, mask=y, x=x, level=level) - mean = layers.mean(x=out) + mean = layers.mean(out) append_backward(mean) diff --git a/python/paddle/v2/fluid/tests/unittests/test_while_op.py b/python/paddle/v2/fluid/tests/unittests/test_while_op.py index 3fa1d5e0ed..3d2a9faf32 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_while_op.py @@ -58,7 +58,7 @@ class TestWhileOp(unittest.TestCase): layers.less_than(x=i, y=array_len, cond=cond) sum_result = layers.array_read(array=mem_array, i=i) - loss = layers.mean(x=sum_result) + loss = layers.mean(sum_result) append_backward(loss) -- GitLab