diff --git a/.scripts/update_readme_paddle_version.py b/.scripts/update_readme_paddle_version.py new file mode 100644 index 0000000000000000000000000000000000000000..56d56914c65956a2bb753bc58269d59034766b1c --- /dev/null +++ b/.scripts/update_readme_paddle_version.py @@ -0,0 +1,53 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re + + +def update(fname, ver): + with open(fname, 'r') as f: + lines = f.readlines() + + for i, line in enumerate(lines): + if 'paddlepaddle>=' in line: + lines[i] = re.sub("paddlepaddle>=[\d+\.]+", + "paddlepaddle>={}".format(ver), line) + + with open(fname, 'w') as f: + for line in lines: + f.write(line) + + +if __name__ == '__main__': + new_version = '1.6.1' + + readme_files = ['../README.md', '../README.cn.md'] + + exclude_examples = [ + 'NeurIPS2019-Learn-to-Move-Challenge', + 'NeurIPS2018-AI-for-Prosthetics-Challenge', 'EagerMode' + ] + for example in os.listdir('../examples/'): + if example not in exclude_examples: + readme_files.append( + os.path.join('../examples', example, 'README.md')) + + for example in os.listdir('../examples/EagerMode/'): + readme_files.append( + os.path.join('../examples/EagerMode', example, 'README.md')) + + print(readme_files) + for fname in readme_files: + update(fname, new_version) diff --git a/README.cn.md b/README.cn.md index dd154a4e634374d5765727e86607e5c178719056..8fc30c5f22bbd0be8751a95f8cf258f5c08fd6de 100644 --- a/README.cn.md +++ b/README.cn.md @@ -62,7 +62,7 @@ ans = agent.sum(1,5) # run remotely and not comsume any local computation resour # 安装: ### 依赖 - Python 2.7 or 3.5+. -- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) >=1.2.1 (**非必须的**,如果你只用并行部分的接口不需要安装paddle) +- [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) (**非必须的**,如果你只用并行部分的接口不需要安装paddle) ``` diff --git a/README.md b/README.md index 7bec38c8a253b4cc53fb4cc0ac0dee3221181b05..93590cfd8af98f10552ade36e9c66c37bc626bc3 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ For users, they can write code in a simple way, just like writing multi-thread c # Install: ### Dependencies - Python 2.7 or 3.5+. -- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) >=1.5.1 (**Optional**, if you only want to use APIs related to parallelization alone) +- [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) (**Optional**, if you only want to use APIs related to parallelization alone) ``` diff --git a/examples/A2C/README.md b/examples/A2C/README.md index bb69f56c52714f0891977d48fb4f2ea84892ad69..d38a5d153b3ab39c59b851775567d90edcdda4fb 100755 --- a/examples/A2C/README.md +++ b/examples/A2C/README.md @@ -19,7 +19,7 @@ Performance of A2C on various envrionments ## How to use ### Dependencies -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym==0.12.1 + atari-py==0.1.7 diff --git a/examples/DDPG/README.md b/examples/DDPG/README.md index d6274c75403849fef9f4c1828ac200646cb305b6..d7794617a2b93cc98f4ca760d6960ea10defb0cc 100644 --- a/examples/DDPG/README.md +++ b/examples/DDPG/README.md @@ -15,7 +15,7 @@ Please see [here](https://github.com/openai/mujoco-py) to know more about Mujoco ## How to use ### Dependencies: + python3.5+ -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym + tqdm diff --git a/examples/DQN/README.md b/examples/DQN/README.md index 4e2fe5d2a893b50a7110ca1a56b1b6b1c7917109..351e44754ad82125eec4e1346fd6301e8c1555b7 100644 --- a/examples/DQN/README.md +++ b/examples/DQN/README.md @@ -21,7 +21,7 @@ Performance of DQN on various environments ## How to use ### Dependencies: -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym + tqdm diff --git a/examples/ES/README.md b/examples/ES/README.md index e1d44d24d949eca8d08b7adfd4e8cc81337d2dbc..207ae2dafa68c5f7d2eb30f956355b07c1bd5d61 100644 --- a/examples/ES/README.md +++ b/examples/ES/README.md @@ -12,7 +12,7 @@ Please see [here](https://github.com/openai/mujoco-py) to know more about Mujoco ## How to use ### Dependencies -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym==0.9.4 + mujoco-py==0.5.1 diff --git a/examples/EagerMode/QuickStart/README.md b/examples/EagerMode/QuickStart/README.md index b4493ca3920fbf0db53d3fab8104c627e8fbc2cd..fa7612fe5b07b5d07962fa53ff04d39a18bee945 100644 --- a/examples/EagerMode/QuickStart/README.md +++ b/examples/EagerMode/QuickStart/README.md @@ -4,7 +4,7 @@ Train an agent with PARL to solve the CartPole problem, a classical benchmark in ## How to use ### Dependencies: -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym diff --git a/examples/GA3C/README.md b/examples/GA3C/README.md index 5b892fdb6c6108f1dd12b923d5057ddfeaadae3e..220e75fb5340020d5dcc63ec46b9ad3676453711 100755 --- a/examples/GA3C/README.md +++ b/examples/GA3C/README.md @@ -16,7 +16,7 @@ Results with one learner (in a P40 GPU) and 24 simulators (in 12 CPU) in 10 mill ## How to use ### Dependencies -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym==0.12.1 + atari-py==0.1.7 diff --git a/examples/IMPALA/README.md b/examples/IMPALA/README.md index cd361daf894ea0cfaa5e056204cc938076ff4541..6d96331aa049fc21eae4f288c47eaba6ab48cec6 100755 --- a/examples/IMPALA/README.md +++ b/examples/IMPALA/README.md @@ -20,7 +20,7 @@ Result with one learner (in a P40 GPU) and 32 actors (in 32 CPUs). ## How to use ### Dependencies -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym==0.12.1 + atari-py==0.1.7 diff --git a/examples/MADDPG/README.md b/examples/MADDPG/README.md index d37b553de416fe9176119f654508774d8bb1278c..55d191474b62f5099d91da51bd80443abd6b87d8 100644 --- a/examples/MADDPG/README.md +++ b/examples/MADDPG/README.md @@ -95,7 +95,7 @@ simple_world_comm
## How to use ### Dependencies: + python3.5+ -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + [multiagent-particle-envs](https://github.com/openai/multiagent-particle-envs) + gym diff --git a/examples/NeurIPS2018-AI-for-Prosthetics-Challenge/README.md b/examples/NeurIPS2018-AI-for-Prosthetics-Challenge/README.md index dc5c8aa54fcd1c4a5caf7af1b3a27c20efd92b20..d889335168aa902ae7ba86317ea85c48ede29e32 100644 --- a/examples/NeurIPS2018-AI-for-Prosthetics-Challenge/README.md +++ b/examples/NeurIPS2018-AI-for-Prosthetics-Challenge/README.md @@ -19,6 +19,7 @@ For more technical details about our solution, we provide: ## Dependencies - python3.6 +- [parl==1.0](https://github.com/PaddlePaddle/PARL) - [paddlepaddle==1.5.1](https://github.com/PaddlePaddle/Paddle) - [osim-rl](https://github.com/stanfordnmbl/osim-rl) - [grpcio==1.12.1](https://grpc.io/docs/quickstart/python.html) diff --git a/examples/NeurIPS2019-Learn-to-Move-Challenge/README.md b/examples/NeurIPS2019-Learn-to-Move-Challenge/README.md index e407c2f0c8f49d7e11ea5a74c1c4ec31eabcb30e..d67fd52f97fef04769501e24a0a802384fe124db 100644 --- a/examples/NeurIPS2019-Learn-to-Move-Challenge/README.md +++ b/examples/NeurIPS2019-Learn-to-Move-Challenge/README.md @@ -6,6 +6,7 @@ The **PARL** team gets the first place in NeurIPS reinforcement learning competi ## Dependencies - python3.6 +- [parl==1.2.1](https://github.com/PaddlePaddle/PARL) - [paddlepaddle==1.5.1](https://github.com/PaddlePaddle/Paddle) - [parl>=1.2.1](https://github.com/PaddlePaddle/PARL) - [osim-rl==3.0.11](https://github.com/stanfordnmbl/osim-rl) diff --git a/examples/PPO/README.md b/examples/PPO/README.md index cd1bc133031a77242760c57a0a3f8a779be422d8..9a87c1192bbee1a1d30ec434c9f3ddb6b79f726c 100644 --- a/examples/PPO/README.md +++ b/examples/PPO/README.md @@ -18,7 +18,7 @@ Please see [here](https://github.com/openai/mujoco-py) to know more about Mujoco ## How to use ### Dependencies: + python3.5+ -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym + tqdm diff --git a/examples/QuickStart/README.md b/examples/QuickStart/README.md index 05cbba67a711665ca08f512674ebd981c353199b..cf11088f7a8d8053f5ae8bc6b5833a148b3c5c89 100644 --- a/examples/QuickStart/README.md +++ b/examples/QuickStart/README.md @@ -4,7 +4,7 @@ Train an agent with PARL to solve the CartPole problem, a classical benchmark in ## How to use ### Dependencies: -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym diff --git a/examples/SAC/README.md b/examples/SAC/README.md index c9a02209d663556900842317aa5f1ab987e14af3..05854e54a339cf44e1157051a575907bf714062c 100644 --- a/examples/SAC/README.md +++ b/examples/SAC/README.md @@ -18,7 +18,7 @@ Please see [here](https://github.com/openai/mujoco-py) to know more about Mujoco ## How to use ### Dependencies: + python3.5+ -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym + mujoco-py>=1.50.1.0 diff --git a/examples/TD3/README.md b/examples/TD3/README.md index 941ce570d115c5a95ed11c105b27d634440ce5a7..a1aee0cefa4cd1b42dcaf742295ba6f423710b1f 100644 --- a/examples/TD3/README.md +++ b/examples/TD3/README.md @@ -19,7 +19,7 @@ Please see [here](https://github.com/openai/mujoco-py) to know more about Mujoco ## How to use ### Dependencies: + python3.5+ -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym + mujoco-py>=1.50.1.0 diff --git a/examples/offline-Q-learning/README.md b/examples/offline-Q-learning/README.md index f442f5d9422c753425d05144accf4b0602e78daf..ea63f1fed50c871ca9e4e1d1a7d092e382f32a74 100644 --- a/examples/offline-Q-learning/README.md +++ b/examples/offline-Q-learning/README.md @@ -21,7 +21,7 @@ learn_program = parl.compile(learn_program, loss=training_loss) We provide a demonstration of offline Q-learning with parallel executing, in which we seperate the procedures of collecting data and training the model. First we collect data by interacting with the environment and save them to a replay memory file, and then fit and evaluate the Q network with the collected data. Repeat these two steps to improve the performance gradually. ### Dependencies: -+ [paddlepaddle>=1.5.1](https://github.com/PaddlePaddle/Paddle) ++ [paddlepaddle>=1.6.1](https://github.com/PaddlePaddle/Paddle) + [parl](https://github.com/PaddlePaddle/PARL) + gym + tqdm