diff --git a/.gitignore b/.gitignore index fe0d13f4d9eab2c2a8e7001c9ecb69cce1333af1..2badc3bdaa52f2608183fa34393719be66630654 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,6 @@ third_party/ cmake-build-* # generated while compiling -python/paddle/v2/fluid/core.so paddle/pybind/pybind.h CMakeFiles cmake_install.cmake diff --git a/doc/api/v2/fluid/data_feeder.rst b/doc/api/fluid/data_feeder.rst similarity index 100% rename from doc/api/v2/fluid/data_feeder.rst rename to doc/api/fluid/data_feeder.rst diff --git a/doc/api/v2/fluid/evaluator.rst b/doc/api/fluid/evaluator.rst similarity index 100% rename from doc/api/v2/fluid/evaluator.rst rename to doc/api/fluid/evaluator.rst diff --git a/doc/api/v2/fluid/executor.rst b/doc/api/fluid/executor.rst similarity index 100% rename from doc/api/v2/fluid/executor.rst rename to doc/api/fluid/executor.rst diff --git a/doc/api/v2/fluid/gen_doc.py b/doc/api/fluid/gen_doc.py similarity index 100% rename from doc/api/v2/fluid/gen_doc.py rename to doc/api/fluid/gen_doc.py diff --git a/doc/api/v2/fluid/gen_doc.sh b/doc/api/fluid/gen_doc.sh similarity index 100% rename from doc/api/v2/fluid/gen_doc.sh rename to doc/api/fluid/gen_doc.sh diff --git a/doc/api/fluid/index.rst b/doc/api/fluid/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0710d8b19956eb235890fdb2a2d764084416aa5 --- /dev/null +++ b/doc/api/fluid/index.rst @@ -0,0 +1,18 @@ +====================== +Fluid +====================== + +.. toctree:: + :maxdepth: 1 + + layers.rst + data_feeder.rst + executor.rst + initializer.rst + evaluator.rst + nets.rst + optimizer.rst + param_attr.rst + profiler.rst + regularizer.rst + io.rst diff --git a/doc/api/v2/fluid/initializer.rst b/doc/api/fluid/initializer.rst similarity index 100% rename from doc/api/v2/fluid/initializer.rst rename to doc/api/fluid/initializer.rst diff --git a/doc/api/v2/fluid/io.rst b/doc/api/fluid/io.rst similarity index 100% rename from doc/api/v2/fluid/io.rst rename to doc/api/fluid/io.rst diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/fluid/layers.rst similarity index 100% rename from doc/api/v2/fluid/layers.rst rename to doc/api/fluid/layers.rst diff --git a/doc/api/v2/fluid/nets.rst b/doc/api/fluid/nets.rst similarity index 100% rename from doc/api/v2/fluid/nets.rst rename to doc/api/fluid/nets.rst diff --git a/doc/api/v2/fluid/optimizer.rst b/doc/api/fluid/optimizer.rst similarity index 100% rename from doc/api/v2/fluid/optimizer.rst rename to doc/api/fluid/optimizer.rst diff --git a/doc/api/v2/fluid/param_attr.rst b/doc/api/fluid/param_attr.rst similarity index 100% rename from doc/api/v2/fluid/param_attr.rst rename to doc/api/fluid/param_attr.rst diff --git a/doc/api/v2/fluid/profiler.rst b/doc/api/fluid/profiler.rst similarity index 100% rename from doc/api/v2/fluid/profiler.rst rename to doc/api/fluid/profiler.rst diff --git a/doc/api/v2/fluid/regularizer.rst b/doc/api/fluid/regularizer.rst similarity index 100% rename from doc/api/v2/fluid/regularizer.rst rename to doc/api/fluid/regularizer.rst diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst index 77337982be1b9b90d1692ab8b83177ebfa8208b2..fc8dbd07eba248942c03c2b609cfc8d8712ed0c7 100644 --- a/doc/api/index_en.rst +++ b/doc/api/index_en.rst @@ -8,4 +8,4 @@ API v2/model_configs.rst v2/data.rst v2/run_logic.rst - v2/fluid.rst + fluid/index.rst diff --git a/doc/api/v2/fluid.rst b/doc/api/v2/fluid.rst deleted file mode 100644 index 5f15cad2b530dfb3702357b3c26885ac2a7b7beb..0000000000000000000000000000000000000000 --- a/doc/api/v2/fluid.rst +++ /dev/null @@ -1,18 +0,0 @@ -====================== -Fluid -====================== - -.. toctree:: - :maxdepth: 1 - - fluid/layers.rst - fluid/data_feeder.rst - fluid/executor.rst - fluid/initializer.rst - fluid/evaluator.rst - fluid/nets.rst - fluid/optimizer.rst - fluid/param_attr.rst - fluid/profiler.rst - fluid/regularizer.rst - fluid/io.rst diff --git a/doc/design/concurrent_programming.md b/doc/design/concurrent_programming.md index afc65e831d58ff427663806e56294292ccbef85b..f022e67fd3a048cd7e53c91d9a1fd0506487b665 100644 --- a/doc/design/concurrent_programming.md +++ b/doc/design/concurrent_programming.md @@ -12,7 +12,7 @@ The following table compares concepts in Fluid and Go | Go | Fluid | |----|-------| -|user-defined functions | [layers](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid) | +|user-defined functions | [layers](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid) | | control-flow and built-in functions | [intrinsics/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators) | | goroutines, channels | [class ThreadPool](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework/thread_pool.h) | | runtime | [class Executor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.h) | diff --git a/doc/design/fluid.md b/doc/design/fluid.md index 2acc168007d25a083f588b48f84e12e29baf4f47..f78fa8c1914124f33b9730f918c8887ced4f8d9d 100644 --- a/doc/design/fluid.md +++ b/doc/design/fluid.md @@ -89,7 +89,7 @@ with train_loop.block(): h[t] = the_step(input[t]) ``` -An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/python/paddle/v2/fluid/tests/test_while_op.py#L36-L44). +An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/bde090a97564b9c61a6aaa38b72ccc4889d102d9/python/paddle/fluid/tests/unittests/test_while_op.py#L50-L58). From the example, the Fluid programs look very similar to their PyTorch equivalent programs, except that Fluid's loop structure, wrapped with Python's `with` statement, could run much faster than just a Python loop. diff --git a/doc/design/memory_optimization.md b/doc/design/memory_optimization.md index 1f68cef4cc28cd005acbeaa5c03cc0d84a83939c..285464ada728d8f7a086a26beca6cfa4418e98e4 100644 --- a/doc/design/memory_optimization.md +++ b/doc/design/memory_optimization.md @@ -101,7 +101,7 @@ In-place is a built-in attribute of an operator. Since we treat in-place and oth #### contruct control flow graph -Following is the ProgramDesc protobuf of [machine translation](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/tests/book/test_machine_translation.py) example. +Following is the ProgramDesc protobuf of [machine translation](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_machine_translation.py) example. - Block0: diff --git a/doc/howto/capi/index_cn.rst b/doc/howto/capi/index_cn.rst index e589a6d346a1e23a4eed9801e02727c80782ae8b..7f100717983f5e950b801e6b05ee48bfff273c62 100644 --- a/doc/howto/capi/index_cn.rst +++ b/doc/howto/capi/index_cn.rst @@ -1,6 +1,23 @@ C-API预测库 ================== +当我们训练完一个神经网络模型之后,下一步就是用模型来做预测。预测就是准备输入数据,经过模型处理之后,得到预测结果的过程。 + +相比于模型训练,预测有如下特点: + +#. 预测不需要训练过程中反向传播和参数更新的部分。 +#. 预测不需要标签(label)。 +#. 预测很多时候需要和用户系统整合在一起。 + +因为上述特点,模型预测SDK需要单独设计,并具备以下特点: + +#. 预测SDK不包含反向传播和参数更新部分,以减小SDK的体积。 +#. 预测SDK需要提供一个简洁的用户接口,方便使用。 +#. 因为输入数据可能有多种结构,对输入数据的格式做清晰简洁的封装。 +#. 为了和用户系统兼容,SDK的接口需要是满足C标准的接口。 + +PaddlePaddle提供了C-API,用于解决上述问题。关于C-API的使用,我们提供了如下指南: + .. toctree:: :maxdepth: 1 diff --git a/doc/howto/cluster/fluid_cluster_train_en.md b/doc/howto/cluster/fluid_cluster_train_en.md index ae825d9a517c7e9005d4e32f8f34b3f6a79be0c9..b4465e8269c2e1603c02404ea33f8c4572e76442 100644 --- a/doc/howto/cluster/fluid_cluster_train_en.md +++ b/doc/howto/cluster/fluid_cluster_train_en.md @@ -32,7 +32,7 @@ The non-cluster version of this demo with fluid API is as follows: ``` python import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) @@ -125,11 +125,11 @@ for pass_id in range(100): ### E2E demo -Please find the complete demo from [here](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py). +Please find the complete demo from [here](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py). First `cd` into the folder that contains the `python` files. In this case: ```bash -cd /paddle/python/paddle/v2/fluid/tests/book_distribute +cd /paddle/python/paddle/fluid/tests/book_distribute ``` In parameter server node run the following in the command line: diff --git a/doc/howto/optimization/cpu_profiling_cn.md b/doc/howto/optimization/cpu_profiling_cn.md index 14eba0e2f34b115f5cd24920b5b1af07ec953d00..d59be670c2b33b64d9b6f96b53f50e5bf9f0613b 100644 --- a/doc/howto/optimization/cpu_profiling_cn.md +++ b/doc/howto/optimization/cpu_profiling_cn.md @@ -35,7 +35,7 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.284 0.284 29.514 29.514 main.py:1() - 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/executor.py:20(run) + 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/executor.py:20(run) 4696 12.040 0.003 12.040 0.003 {built-in method run} 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() ``` @@ -61,9 +61,9 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py ```text 4696 12.040 0.003 12.040 0.003 {built-in method run} 300005 0.874 0.000 1.681 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/dataset/mnist.py:38(reader) - 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:219(__init__) - 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) - 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/__init__.py:1() + 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:219(__init__) + 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) + 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/__init__.py:1() ``` 可以看到最耗时的函数是C++端的`run`函数。这需要联合我们第二节`Python`与`C++`混合代码的性能分析来进行调优。而`sync_with_cpp`函数的总共耗时很长,每次调用的耗时也很长。于是我们可以点击`sync_with_cpp`的详细信息,了解其调用关系。 @@ -76,9 +76,9 @@ Called By: Function was called by... ncalls tottime cumtime -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:487(clone) - 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:534(append_backward) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:487(clone) + 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:534(append_backward) Called: diff --git a/doc/howto/optimization/cpu_profiling_en.md b/doc/howto/optimization/cpu_profiling_en.md index 368af40cc7308cf6f4c609361078fe3ba02213ed..01e5fddf61547f9fc86ef18a6f2e2ac508d22dbb 100644 --- a/doc/howto/optimization/cpu_profiling_en.md +++ b/doc/howto/optimization/cpu_profiling_en.md @@ -49,7 +49,7 @@ port, we will see the output like the following: ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.284 0.284 29.514 29.514 main.py:1() - 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/executor.py:20(run) + 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/executor.py:20(run) 4696 12.040 0.003 12.040 0.003 {built-in method run} 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() ``` @@ -74,9 +74,9 @@ focus on. We can sort above profiling file by tottime: ```text 4696 12.040 0.003 12.040 0.003 {built-in method run} 300005 0.874 0.000 1.681 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/dataset/mnist.py:38(reader) - 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:219(__init__) - 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) - 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/__init__.py:1() + 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:219(__init__) + 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) + 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/__init__.py:1() ``` We can see that the most time-consuming function is the `built-in @@ -93,9 +93,9 @@ Called By: Function was called by... ncalls tottime cumtime -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:487(clone) - 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:534(append_backward) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:487(clone) + 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:534(append_backward) Called: diff --git a/doc/howto/read_source.md b/doc/howto/read_source.md index 31987920f32f217ac2db42548874cfe7da57dd72..edf46aff8c6cc9fc01d26c6453b3a8123238ef91 100644 --- a/doc/howto/read_source.md +++ b/doc/howto/read_source.md @@ -1,6 +1,6 @@ # PaddlePaddle Fluid Source Code Overview -Examples: https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid/tests/book +Examples: https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid/tests/book Core: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework @@ -26,16 +26,16 @@ sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) ``` -- Variables: `x`, `y`, `y_predict`, `cost` and `avg_cost`. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/framework.py#) -- Layers: `fluid.layers.data`, `fluid.layers.fc` and `fluid.layers.mean` are layers. [Python](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid/layers) +- Variables: `x`, `y`, `y_predict`, `cost` and `avg_cost`. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/framework.py#) +- Layers: `fluid.layers.data`, `fluid.layers.fc` and `fluid.layers.mean` are layers. [Python](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid/layers) - Every Layer has one or more operators and variables/parameters - All the operators are defined at [`paddle/operators/`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators). Other worth-looking files: - Base class: [`paddle/framework/operator.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h) - Operator Registration: [`paddle/framework/op_registry.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h) - Operator Lookup: [`paddle/framework/op_info.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_info.h) - Optimizer: `fluid.optimizer.SGD`. It does the following - - Add backward operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/backward.py)] - - Add optimizer operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/optimizer.py)] + - Add backward operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/backward.py)] + - Add optimizer operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/optimizer.py)] # Run Time @@ -57,7 +57,7 @@ exe.run(fluid.default_main_program(), - Place: `place`. one of CPU, GPU or FPGA. [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h) - The device handle are at [paddle/platform/device_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h) -- Executor: `fluid.Executor(place)`. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/executor.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc)] +- Executor: `fluid.Executor(place)`. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/executor.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc)] - Feeds the data: `feed=feeder.feed(data)` - Evaluates all the operators - Fetches the result: `fetch_list=[avg_cost]` diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 0d2691e8115ad6de46dcd4fcd5b7fd79ed60ecb9..88863ab99eb765124bc825b4e9ec9dff890ba3cc 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -58,13 +58,13 @@ static void CreateTensor(Variable* var, proto::VarType::Type var_type) { var->GetMutable(); } else if (var_type == proto::VarType::CHANNEL) { var->GetMutable(); - } else if (var_type == proto::VarType::NCCL_COM) { - // GetMutable will be called in ncclInit + } else if (var_type == proto::VarType::RAW) { + // GetMutable will be called in operator } else { PADDLE_THROW( "Variable type %d is not in " "[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, " - "LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, NCCL_COM]", + "LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, RAW]", var_type); } } diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 5b43f5a8a4a1c128b04ac206d387e30c55f533fe..53725d3d802c27202a6379cee518991a628cf9a1 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -113,7 +113,10 @@ message VarType { PLACE_LIST = 14; READER = 15; CHANNEL = 16; - NCCL_COM = 17; + // Any runtime decided variable type is raw + // raw variables should manage their own allocations + // in operators like nccl_op + RAW = 17; } required Type type = 1; diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index ee9044b1f5d46dc725c9583d0d90ab5681d2850c..7266f3276477891d3c7b6827316a428ef7a31c6e 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -177,8 +177,8 @@ std::shared_ptr RPCClient::GetChannel(const std::string& ep) { args.SetMaxSendMessageSize(std::numeric_limits::max()); args.SetMaxReceiveMessageSize(std::numeric_limits::max()); - auto ch = std::shared_ptr( - grpc::CreateCustomChannel(ep, grpc::InsecureChannelCredentials(), args)); + auto ch = + grpc::CreateCustomChannel(ep, grpc::InsecureChannelCredentials(), args); channels_[ep] = ch; return ch; diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 0994bba782b42be994ae479f4c9c4de5a2e384ed..9185666c56c4621d42429c9cfdb079001c6336f1 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -65,7 +65,7 @@ class NCCLInitOpVarTypeInference : public framework::VarTypeInference { framework::BlockDesc *block) const override { auto out_var_name = op_desc.Output("Communicator").front(); auto &out_var = block->FindRecursiveOrCreateVar(out_var_name); - auto var_type = framework::proto::VarType::NCCL_COM; + auto var_type = framework::proto::VarType::RAW; out_var.SetType(var_type); } }; diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 58850bf566e00f88de19305110e2ef696b73467e..178976f96fdbd08cead7b7c518ea1fbaaa2a5db8 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -121,9 +121,27 @@ This operator will send tensor to recv_op at the parameter server. } }; +class SendOpVarTypeInference : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + auto out_var_name = op_desc.Output("RPCClient").front(); + auto& out_var = block->FindRecursiveOrCreateVar(out_var_name); + auto var_type = framework::proto::VarType::RAW; + out_var.SetType(var_type); + } +}; + +class SendOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override {} +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(send, ops::SendOp, ops::SendOpMaker); +REGISTER_OPERATOR(send, ops::SendOp, paddle::framework::EmptyGradOpMaker, + ops::SendOpMaker, ops::SendOpVarTypeInference, + ops::SendOpShapeInference); diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index b725be79529c5ccdde12446b5b5c09eaf47550e6..b0a2497d919b65afbe5eeaf4fe47c19baa1aba1c 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -252,7 +252,7 @@ void BindVarDsec(py::module &m) { .value("CHANNEL", proto::VarType::CHANNEL) .value("PLACE_LIST", proto::VarType::PLACE_LIST) .value("READER", proto::VarType::READER) - .value("NCCL_COM", proto::VarType::NCCL_COM); + .value("RAW", proto::VarType::RAW); } void BindOpDesc(py::module &m) { diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 270f2f4c181df847348be12a199534d47b3577f5..0fea6a80794a64abc2dbf1428d534840febcd450 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -28,10 +28,9 @@ int main(int argc, char** argv) { } #ifdef PADDLE_WITH_CUDA new_argv.push_back( - strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory," - "warpctc_dir")); + strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory")); #else - new_argv.push_back(strdup("--tryfromenv=use_pinned_memory,warpctc_dir")); + new_argv.push_back(strdup("--tryfromenv=use_pinned_memory")); #endif int new_argc = static_cast(new_argv.size()); char** new_argv_address = new_argv.data(); diff --git a/python/paddle/fluid/.gitignore b/python/paddle/fluid/.gitignore index 2ff540d5764b76cf7bac64fc2bb9df6e9c1b398a..80c1cf3fcb86aa50600e02e4765640a91560916e 100644 --- a/python/paddle/fluid/.gitignore +++ b/python/paddle/fluid/.gitignore @@ -1 +1,2 @@ proto +core.so diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 2fcf3753c5f1211d3b27f38fbdc8d097c437c79a..8da9ca290b22ae69b1fd195d8614c31dc4e13e00 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -226,8 +226,7 @@ class DistributeTranspiler: rpc_client_var = program.global_block().create_var( name="RPC_CLIENT_VAR", persistable=True, - dtype='float32', # dtype and shape is not used in fact - shape=[0]) + type=core.VarDesc.VarType.RAW) # create send_op program.global_block().append_op( diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 88c9ae31b7902a1b098700839fb533c013b03103..bd79022a0c39cf18bd05d49ac62986d342a4ae06 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -130,7 +130,7 @@ def generate_layer_fn(op_type): o_name = not_intermediate_outputs[0].name intermediate_output_names = [output.name for output in intermediate_outputs] - def infer_and_check_dtype(op_proto, **kwargs): + def infer_and_check_dtype(op_proto, *args, **kwargs): """ This function performs the sanity check for dtype and instance type. @@ -141,6 +141,10 @@ def generate_layer_fn(op_type): val = kwargs.pop(name, []) if not isinstance(val, list) and not isinstance(val, tuple): val = [val] + if len(val) == 0: + val = [args[0]] + args = args[1:] + for each in val: if not isinstance(each, Variable): raise ValueError("input of {0} must be variable".format( @@ -155,10 +159,10 @@ def generate_layer_fn(op_type): return dtype - def func(**kwargs): + def func(*args, **kwargs): helper = LayerHelper(op_type, **kwargs) - dtype = infer_and_check_dtype(op_proto, **kwargs) + dtype = infer_and_check_dtype(op_proto, *args, **kwargs) inputs = dict() for ipt in op_proto.inputs: @@ -166,6 +170,9 @@ def generate_layer_fn(op_type): val = kwargs.pop(name, []) if not isinstance(val, list) and not isinstance(val, tuple): val = [val] + if len(val) == 0 and len(args) != 0: + val = args[0] + args = args[1:] inputs[ipt.name] = val outputs = dict() diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 97e8f082cffe2b139fb40ddbf9b27f463f47c8ab..8100e8f034fb5d6ca706d1408f448fa26193f282 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -160,8 +160,8 @@ def sums(input, out=None): a0 = layers.array_read(array=tmp, i=i) i = layers.increment(x=i) a1 = layers.array_read(array=tmp, i=i) - mean_a0 = layers.mean(x=a0) - mean_a1 = layers.mean(x=a1) + mean_a0 = layers.mean(a0) + mean_a1 = layers.mean(a1) a_sum = layers.sums(input=[mean_a0, mean_a1]) """ helper = LayerHelper('sum', **locals()) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 61febc4e383b5eb0e75c0005330b92fa90ddbe44..93a19de92e1654df2424019d764f1cbbe6314686 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -36,10 +36,18 @@ class Optimizer(object): """ def __init__(self, learning_rate, global_step=None, regularization=None): - assert learning_rate is not None + if not isinstance(learning_rate, float) and \ + not isinstance(learning_rate, framework.Variable): + raise TypeError("learning rate should be float or Variable") self._global_step = global_step self.regularization = regularization - self._global_learning_rate = learning_rate + self._learning_rate = learning_rate + # each program should have a independent learning rate + # program -> Variable(learning_rate) + self._learning_rate_map = dict() + if isinstance(self._learning_rate, framework.Variable): + self._learning_rate_map[framework.default_main_program( + )] = self._learning_rate # Dictionary of accumulators. Some optimizer subclasses need to # allocate and manage extra variables associated with the parameters # to train. These variables are called accumulators. @@ -48,26 +56,33 @@ class Optimizer(object): self.helper = None def _create_global_learning_rate(self): - if isinstance(self._global_learning_rate, float): - self._global_learning_rate = layers.create_global_var( - name=unique_name.generate("learning_rate"), - shape=[1], - value=float(self._global_learning_rate), - dtype='float32', - persistable=True) - - if not isinstance(self._global_learning_rate, framework.Variable): - raise ValueError("learning rate should be a Variable, " - "actual type is %s", - type(self._global_learning_rate)) - - @property - def global_learning_rate(self): + lr = self.global_learning_rate() + + if isinstance(lr, framework.Variable): + return + else: + if not isinstance(self._learning_rate, float): + raise TypeError( + "learning rate variable is create outside optimizer," + "can not create new learning rate variable for new program") + + # create learning rate in the current main program + self._learning_rate_map[framework.default_main_program( + )] = layers.create_global_var( + name=unique_name.generate("learning_rate"), + shape=[1], + value=float(self._learning_rate), + dtype='float32', + persistable=True) + + def global_learning_rate(self, program=None): """ get global decayed learning rate :return: """ - return self._global_learning_rate + if program is None: + program = framework.default_main_program() + return self._learning_rate_map.get(program, None) def _append_optimize_op(self, block, param_and_grad): """ append optimize operator to block and return all the added optimize_op @@ -78,7 +93,7 @@ class Optimizer(object): # create learning rate variable for every parameter param = param_and_grad[0] param_lr = param.optimize_attr['learning_rate'] - return self._global_learning_rate * param_lr + return self.global_learning_rate() * param_lr def _create_accumulators(self, block, parameters): """Create all accumulators needed by the parameters diff --git a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py index adf38ea28995be137efe853d0842142cbeb282e7..1ed58c3d3d170a938cef813692d7841227964b16 100644 --- a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py +++ b/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py @@ -147,7 +147,7 @@ def seq_to_seq_net(): label = fluid.layers.data( name='label_sequence', shape=[1], dtype='int64', lod_level=1) cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) return avg_cost, prediction diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index 0d6fb872d70e339dbd0d86aa3406740ca5e4ef2c..8ceee52ff9c425a6cc5479acb9c5b8f0928fc991 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -29,7 +29,7 @@ def train(use_cuda, save_dirname): y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index 2f6c7d1a70d2c9550f3af1babb7fe9ac744fb679..615e23529a9ac613d5e37ae68175cc09ad73b43f 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -110,7 +110,7 @@ def train(net_type, use_cuda, save_dirname): predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) # Test program diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index fcc9dbf8bbf15cc8756127046ebe56ab9246e7bc..336e6ed2a32aeefe07a17055ae29d6b82eb5041e 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -164,7 +164,7 @@ def train(use_cuda, save_dirname=None): label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=mix_hidden_lr)) - avg_cost = fluid.layers.mean(x=crf_cost) + avg_cost = fluid.layers.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index 287ef7a752474e2a865b71c140d5f66977c9aa82..bd768d5f08de57005f76ea3ea25b318a930a58c7 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -178,7 +178,7 @@ def train_main(use_cuda, is_sparse): label = pd.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = pd.cross_entropy(input=rnn_out, label=label) - avg_cost = pd.mean(x=cost) + avg_cost = pd.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index 2a778b04538582d4bff20a97221bc2daa81452f8..12307111d5dda549bff7ea40ac7c341c69c3e4bd 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -48,7 +48,7 @@ BATCH_SIZE = 64 def loss_net(hidden, label): prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_loss = fluid.layers.mean(x=loss) + avg_loss = fluid.layers.mean(loss) acc = fluid.layers.accuracy(input=prediction, label=label) return prediction, avg_loss, acc @@ -101,8 +101,8 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): avg_loss, acc = pd() # get mean loss and acc through every devices. - avg_loss = fluid.layers.mean(x=avg_loss) - acc = fluid.layers.mean(x=acc) + avg_loss = fluid.layers.mean(avg_loss) + acc = fluid.layers.mean(acc) else: prediction, avg_loss, acc = net_conf(img, label) diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index 81318823e5c329faa140dbaa8d5b23eb6dc0f7ca..c190107e02e044635ff0b47c61de41c8bfed5acc 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -147,7 +147,7 @@ def model(): label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(x=square_cost) + avg_cost = layers.mean(square_cost) return scale_infer, avg_cost diff --git a/python/paddle/fluid/tests/book/test_understand_sentiment.py b/python/paddle/fluid/tests/book/test_understand_sentiment.py index 8bb4bf92de9a6e6e6160c5eca675bc154d254d51..ab8df93651c01f75eeda1eab1ac95db867678106 100644 --- a/python/paddle/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/test_understand_sentiment.py @@ -42,7 +42,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -82,7 +82,7 @@ def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32, last = fluid.layers.sequence_last_step(rnn()) prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -119,7 +119,7 @@ def stacked_lstm_net(data, size=class_dim, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -158,8 +158,8 @@ def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): pd.write_output(acc) cost, acc = pd() - cost = fluid.layers.mean(x=cost) - acc_out = fluid.layers.mean(x=acc) + cost = fluid.layers.mean(cost) + acc_out = fluid.layers.mean(acc) prediction = None assert save_dirname is None diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 965f1ff851abbf1dbbda4de1831f9208a8dd0d3c..f33a759240f21f52817c482a2ebe008155dbd97b 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -118,7 +118,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname): size=dict_size, act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) return avg_cost, predict_word word_dict = paddle.dataset.imikolov.build_dict() @@ -143,7 +143,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname): ])) pd.write_output(avg_cost) - avg_cost = fluid.layers.mean(x=pd()) + avg_cost = fluid.layers.mean(pd()) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py index b5fbffea368aa78f33bd1d1f85de7c2e830f2d44..01c1fa24fd4ae449af1f66f40a1c641f718c0e58 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -24,7 +24,7 @@ y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py index 9f807df8829db59833cafb9723371abc240da62d..e9101fd763c4fd466e1e9287a24487516dc250f6 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -114,7 +114,7 @@ else: predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 533cdb5a9ba1546ba8d922b7651cd09d0c1a363c..2d0c54fa7c37f42241702769d01c546fcb6efd8e 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -154,7 +154,7 @@ def main(): label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=mix_hidden_lr)) - avg_cost = fluid.layers.mean(x=crf_cost) + avg_cost = fluid.layers.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py index bee022bf4e262f027871849a4e47cc04e91b8025..6304927364e2fa9cf570adf8d1cee81b78604cec 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -65,7 +65,7 @@ concat_embed = fluid.layers.concat( hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) train_reader = paddle.batch( diff --git a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py index 243d7f5073b5e21f6203907626e382d3daca0949..f5ef08430e0d3ec340f1859d0ec4343e03099a6f 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py +++ b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py @@ -94,7 +94,7 @@ def main(): label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index 223428f820c278d6b76f1339bd83c6e55e112ed9..eae1fe62af431a3be677c02460f942efa3c8dde2 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -37,7 +37,7 @@ conv_pool_2 = fluid.nets.simple_img_conv_pool( predict = fluid.layers.fc(input=conv_pool_2, size=10, act="softmax") cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.01) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py index 9fb356e78d24931b2b9aef7a72bef4b4982eba94..dad95c0f3fff7fb5f3521e561617a2accad86823 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py @@ -32,7 +32,7 @@ predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py index ea05fbb457fa4250590778988d05c5ee7b0124be..4329c821c27998d47e735ff0214775a078b9ec8e 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py @@ -117,7 +117,7 @@ def model(): label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(x=square_cost) + avg_cost = layers.mean(square_cost) return avg_cost diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index 3b057c5d96204989055003e1520b7f35ce8d89c7..ee0d8597b75e6326b60276668a85cd8422327e48 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -38,7 +38,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py index b6c85ebb40628ee2bb6d2ecbda8966045597092f..fa792cbf92906fe4e922ab9f9fac0245a56b414d 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -49,7 +49,7 @@ def stacked_lstm_net(data, size=class_dim, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index 64f55e08531dd47631fbd946de25436805ac9fdd..57202cea1aa277631ea41a0f35ad0e308b961e3e 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -117,7 +117,7 @@ else: predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.001) opts = optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index c439401ea92defd3fe2fc5ff44f7e2c8d7a2ae74..689a75afc7ccdf84142f5531a438e1f9af7af4ca 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -100,7 +100,7 @@ def main(): label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/demo/fc_gan.py b/python/paddle/fluid/tests/demo/fc_gan.py index fd9d80fa94467c70d28e4c71f34ebf0f964e5145..7452ea2a34aa0c75d8e0990639b29705033af98b 100644 --- a/python/paddle/fluid/tests/demo/fc_gan.py +++ b/python/paddle/fluid/tests/demo/fc_gan.py @@ -96,7 +96,7 @@ def main(): x=D(img), label=fluid.layers.data( name='label', shape=[1], dtype='float32')) - d_loss = fluid.layers.mean(x=d_loss) + d_loss = fluid.layers.mean(d_loss) with fluid.program_guard(dg_program, startup_program): noise = fluid.layers.data( @@ -107,7 +107,7 @@ def main(): x=D(g_img), label=fluid.layers.fill_constant_batch_size_like( input=noise, dtype='float32', shape=[-1, 1], value=1.0)) - dg_loss = fluid.layers.mean(x=dg_loss) + dg_loss = fluid.layers.mean(dg_loss) opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) diff --git a/python/paddle/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py index f4c0dfa7a7f2417c3e2406f29653100d907c8606..b2fd5ae29c724da52df0a5d3cb56d2ec9e5530f3 100644 --- a/python/paddle/fluid/tests/test_error_clip.py +++ b/python/paddle/fluid/tests/test_error_clip.py @@ -33,7 +33,7 @@ with fluid.program_guard(main_program=prog): label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() prog_clip.block(0).var(hidden1.name).set_error_clip( diff --git a/python/paddle/fluid/tests/test_gradient_clip.py b/python/paddle/fluid/tests/test_gradient_clip.py index ca0b129008b29a208567f5a862b12f307b3eae43..68b682f68b1fd147b821cfdb1e0866cf8aa04bff 100644 --- a/python/paddle/fluid/tests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/test_gradient_clip.py @@ -30,7 +30,7 @@ with fluid.program_guard(main_program=prog): label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() diff --git a/python/paddle/fluid/tests/test_mnist_if_else_op.py b/python/paddle/fluid/tests/test_mnist_if_else_op.py index 0b557942b0893e5af203d869ed8d235bbb85fba8..94395f6cfb4648967558ed265e798e3505c20fc1 100644 --- a/python/paddle/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/fluid/tests/test_mnist_if_else_op.py @@ -56,7 +56,7 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = layers.merge_lod_tensor( in_true=true_out, in_false=false_out, mask=cond, x=image) loss = layers.cross_entropy(input=prob, label=label) - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) @@ -113,7 +113,7 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = ie() loss = layers.cross_entropy(input=prob[0], label=label) - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index 380a8549b3ac3e34d3d2a41c829e6dca529fcbdf..a49e9035a43e04fc1d1b2328d7562c053320b24b 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -49,15 +49,15 @@ class TestArrayReadWrite(unittest.TestCase): i = layers.increment(x=i) a2 = layers.array_read(array=arr, i=i) - mean_a0 = layers.mean(x=a0) - mean_a1 = layers.mean(x=a1) - mean_a2 = layers.mean(x=a2) + mean_a0 = layers.mean(a0) + mean_a1 = layers.mean(a1) + mean_a2 = layers.mean(a2) a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2]) - mean_x0 = layers.mean(x=x[0]) - mean_x1 = layers.mean(x=x[1]) - mean_x2 = layers.mean(x=x[2]) + mean_x0 = layers.mean(x[0]) + mean_x1 = layers.mean(x[1]) + mean_x2 = layers.mean(x[2]) x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2]) diff --git a/python/paddle/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py index 15731fefc824b7e71a83d7527d022bdb92e8ed9b..06e676cd83e77549afd679e730426c590cc046bf 100644 --- a/python/paddle/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_calc_gradient.py @@ -26,7 +26,7 @@ class TestCalcGradient(unittest.TestCase): x = layers.create_parameter(dtype="float32", shape=[5, 10]) y = layers.create_parameter(dtype="float32", shape=[10, 8]) mul_out = layers.mul(x=x, y=y) - mean_out = layers.mean(x=mul_out) + mean_out = layers.mean(mul_out) a = calc_gradient(mean_out, mul_out) b = calc_gradient(mean_out, x) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index fad97a28164bbb9c7e6a94040c644ba8fc790c0f..084b8d37386fac0366c190f5f30dd39467072498 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -39,7 +39,7 @@ class ConditionalBlock(unittest.TestCase): outs = exe.run(feed={'X': x}, fetch_list=[out])[0] print outs - loss = layers.mean(x=out) + loss = layers.mean(out) append_backward(loss=loss) outs = exe.run( feed={'X': x}, diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index 0a3812a68d60c6bc44b1c275526dc0ac5c9e3b1c..df7ab0d29bdfc9410cd7dd4a8f2a7cd440ef4aba 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -81,7 +81,7 @@ class TestDynRNN(unittest.TestCase): logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) - loss = fluid.layers.mean(x=loss) + loss = fluid.layers.mean(loss) sgd = fluid.optimizer.SGD(1e-4) sgd.minimize(loss=loss) cpu = fluid.CPUPlace() @@ -119,7 +119,7 @@ class TestDynRNN(unittest.TestCase): label = fluid.layers.data(name='label', shape=[1], dtype='float32') loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) - loss = fluid.layers.mean(x=loss) + loss = fluid.layers.mean(loss) sgd = fluid.optimizer.Adam(1e-3) sgd.minimize(loss=loss) diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py index f3dcbe3386a7241449ef7d7aa7982557ddb47823..22329390754d8d010dced0d1aca35617140cd097 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -272,7 +272,7 @@ class TestSimpleMul(SeedFixedTestCase): out = rnn() out = fluid.layers.sequence_pool(out, pool_type='last') - loss = fluid.layers.mean(x=out) + loss = fluid.layers.mean(out) fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() @@ -348,7 +348,7 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): out = rnn() last = fluid.layers.sequence_pool(input=out, pool_type='last') - loss = fluid.layers.mean(x=last) + loss = fluid.layers.mean(last) fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index c28177a1ae513bb84a220a7b945cbf619c2d8e83..b03a70f1b9e61162d37541ffeba8510fc11c605a 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -125,7 +125,7 @@ class TestDyRnnStaticInput(unittest.TestCase): return static_input_step_outs last = fluid.layers.sequence_pool(input=rnn(), pool_type='last') - loss = fluid.layers.mean(x=last) + loss = fluid.layers.mean(last) append_backward(loss) static_input_grad = self._program.global_block().var( framework.grad_var_name('static_input_tensor')) diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index 238ca7188b80f53fd3f84801e4fe6ac6ad4d0f97..51460cbb1370f6794e13d18fe099865b4713691f 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -38,7 +38,7 @@ class TestBook(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) sgd_optimizer.minimize(avg_cost, init_program) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index bb673d3b541e6589e8979ced0b4aa25075b22a17..6944cca394fbc1ddde09dfeb0bc82e357a3cd225 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -30,7 +30,7 @@ class TestBook(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) y = layers.data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) program.append_backward(avg_cost) @@ -49,7 +49,7 @@ class TestBook(unittest.TestCase): act='softmax', param_attr=["sftmax.w1", "sftmax.w2"]) cost = layers.cross_entropy(input=predict, label=label) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) @@ -92,7 +92,7 @@ class TestBook(unittest.TestCase): predict = layers.fc(input=conv_pool_2, size=10, act="softmax") cost = layers.cross_entropy(input=predict, label=label) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) program.append_backward(avg_cost) @@ -140,7 +140,7 @@ class TestBook(unittest.TestCase): size=dict_size, act='softmax') cost = layers.cross_entropy(input=predict_word, label=next_word) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) @@ -287,7 +287,7 @@ class TestBook(unittest.TestCase): num_total_classes=dict_size, param_attr='nce.w', bias_attr='nce.b') - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) self.assertIsNotNone(avg_loss) print(str(default_main_program())) diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py index de1a3d101d8343bf25be5fd6e8b7791d06b4ab02..66a03640c148d769787593f41a44cd4d1aaa10b1 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -182,7 +182,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): array = layers.lod_tensor_to_array(x, table) result = layers.array_to_lod_tensor(array, table) - mean = layers.mean(x=result) + mean = layers.mean(result) append_backward(mean) diff --git a/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py b/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py index e57804d0df840331c9f53e4ff1a03f062c3fe93e..f3dcca6b0107a9c4a6efcb0c0fd50324aaf92648 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py @@ -29,7 +29,7 @@ class TestControlFlowGraph(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) y = layers.data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) opt = optimizer.SGD(learning_rate=0.001) opt = opt.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_op.py b/python/paddle/fluid/tests/unittests/test_parallel_op.py index edaae977dfbe9a98ed7c78f8cf990082a043a8a9..cd20b430f93498372dd706a46c3a6d9d798721f5 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_op.py @@ -127,7 +127,7 @@ class BaseParallelForTest(unittest.TestCase): data = next(generator) loss = generator.send(data) self.assertIsNotNone(loss) - avg_loss = fluid.layers.mean(x=loss) + avg_loss = fluid.layers.mean(loss) fluid.backward.append_backward(loss=avg_loss) exe = fluid.Executor(place) @@ -170,7 +170,7 @@ class ParallelOpTest(BaseParallelForTest): x = fluid.layers.data(shape=[784], dtype='float32', name='img') x = yield x hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') - loss = fluid.layers.mean(x=hidden) + loss = fluid.layers.mean(hidden) yield loss def test_simple_fc(self): @@ -200,7 +200,7 @@ class ParallelOpTestMultipleInput(BaseParallelForTest): hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') hidden2 = fluid.layers.fc(input=hidden1, size=200, param_attr='fc2.w') hidden3 = fluid.layers.fc(input=hidden2, size=200, param_attr='fc3.w') - loss = fluid.layers.mean(x=hidden3) + loss = fluid.layers.mean(hidden3) yield loss def test_simple_fc(self): diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index 5c08bf4fc3f14599d71bd31767a34c0b5b7621f2..c75080fbb96d472810e5d6a1d02a77c456006f66 100644 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -35,7 +35,7 @@ class TestPrintOpCPU(unittest.TestCase): x.stop_gradient = False printed = layers.Print(input=x, **kargs) if only_forward: return printed - loss = layers.mean(x=printed) + loss = layers.mean(printed) append_backward(loss=loss) return loss diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 09f23894c4c928cb3e07f1c0a42a8917091950b6..d9444b50a2362d4d122ea880d47d337426fbdc96 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -54,7 +54,7 @@ class TestProfiler(unittest.TestCase): predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.evaluator.Accuracy(input=predict, label=label) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index d2d13386ac1545605b807486d14c0925d890df5e..d6ff18430e319e236f03d5661381e923cc956590 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -127,7 +127,7 @@ class RecurrentOpTest1(unittest.TestCase): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -261,7 +261,7 @@ class RecurrentOpTest2(RecurrentOpTest1): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -360,7 +360,7 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3( self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -444,7 +444,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) print self.main_program def create_rnn_op(self): diff --git a/python/paddle/fluid/tests/unittests/test_registry.py b/python/paddle/fluid/tests/unittests/test_registry.py index d04192cb3a380b2f71b8a920479545167251387f..a361c4624e3e2efa817e8137ff31133997a0a1fb 100644 --- a/python/paddle/fluid/tests/unittests/test_registry.py +++ b/python/paddle/fluid/tests/unittests/test_registry.py @@ -22,7 +22,7 @@ class TestRegistry(unittest.TestCase): @decorators.prog_scope() def test_registry_layer(self): x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') - output = fluid.layers.mean(x=x) + output = fluid.layers.mean(x) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index ba34e6a486e780a703b32b66c3b2caf7676cd27e..1d93230e7b74c5b6c00bbe125e3ae2d3a649b4b9 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -39,7 +39,7 @@ class TestShrinkRNNMemoryBase(unittest.TestCase): i = layers.increment(x=i) i.stop_gradient = True self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table) - mem3_mean = layers.mean(x=self.mem3) + mem3_mean = layers.mean(self.mem3) append_backward(loss=mem3_mean) self.x_grad = self.main_program.global_block().var('x@GRAD') diff --git a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py index 0b04ff302dc82a150a223b2f6632d77f95c61dc7..02cc7da84918041c33bf5c8def46025bc87a2b9e 100644 --- a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py @@ -145,7 +145,7 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): input=x, mask=y, level=level) out = layers.merge_lod_tensor( in_true=out_true, in_false=out_false, mask=y, x=x, level=level) - mean = layers.mean(x=out) + mean = layers.mean(out) append_backward(mean) diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index 5afeb5ae8983df5c2c426db79a007173e8e470e8..fe8808bc044684c96fb3382836be32dac1d241f3 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -58,7 +58,7 @@ class TestWhileOp(unittest.TestCase): layers.less_than(x=i, y=array_len, cond=cond) sum_result = layers.array_read(array=mem_array, i=i) - loss = layers.mean(x=sum_result) + loss = layers.mean(sum_result) append_backward(loss)