diff --git a/.copyright.hook b/.copyright.hook new file mode 100644 index 0000000000000000000000000000000000000000..2446e27248125134ab624ed557823993c90fafc5 --- /dev/null +++ b/.copyright.hook @@ -0,0 +1,106 @@ +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +import argparse +import io, re +import sys, os +import subprocess +import platform + +COPYRIGHT = ''' + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +''' + +LANG_COMMENT_MARK = None + +NEW_LINE_MARK = None + +COPYRIGHT_HEADER = None + +if platform.system() == "Windows": + NEW_LINE_MARK = "\r\n" +else: + NEW_LINE_MARK = '\n' + COPYRIGHT_HEADER = COPYRIGHT.split(NEW_LINE_MARK)[1] + p = re.search('(\d{4})', COPYRIGHT_HEADER).group(0) + process = subprocess.Popen(["date", "+%Y"], stdout=subprocess.PIPE) + date, err = process.communicate() + date = date.decode("utf-8").rstrip("\n") + COPYRIGHT_HEADER = COPYRIGHT_HEADER.replace(p, date) + + +def generate_copyright(template, lang='C'): + if lang == 'Python': + LANG_COMMENT_MARK = '#' + else: + LANG_COMMENT_MARK = "//" + + lines = template.split(NEW_LINE_MARK) + ans = LANG_COMMENT_MARK + " " + COPYRIGHT_HEADER + NEW_LINE_MARK + for lino, line in enumerate(lines): + if lino == 0 or lino == 1 or lino == len(lines) - 1: continue + ans += LANG_COMMENT_MARK + " " + line + NEW_LINE_MARK + + return ans + "\n" + + +def lang_type(filename): + if filename.endswith(".py"): + return "Python" + elif filename.endswith(".h"): + return "C" + elif filename.endswith(".hpp"): + return "C" + elif filename.endswith(".cc"): + return "C" + elif filename.endswith(".cpp"): + return "C" + elif filename.endswith(".cu"): + return "C" + elif filename.endswith(".cuh"): + return "C" + elif filename.endswith(".go"): + return "C" + elif filename.endswith(".proto"): + return "C" + else: + print("Unsupported filetype") + exit(0) + + +def main(argv=None): + parser = argparse.ArgumentParser( + description='Checker for copyright declaration.') + parser.add_argument('filenames', nargs='*', help='Filenames to check') + args = parser.parse_args(argv) + + retv = 0 + for filename in args.filenames: + first_line = io.open(filename).readline() + if "COPYRIGHT" in first_line.upper() : continue + original_contents = io.open(filename).read() + new_contents = generate_copyright( + COPYRIGHT, lang_type(filename)) + original_contents + print('Auto Insert Copyright Header {}'.format(filename)) + retv = 1 + with io.open(filename, 'w') as output_file: + output_file.write(new_contents) + + return retv + + +if __name__ == '__main__': + exit(main()) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 59661c9c1da53a2ddac0127ed1827fedde811a1d..89c620bb2f7ef634fa80b64eec7037e8cb9a190c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,3 +31,11 @@ - id: go-fmt types: - go +- repo: local + hooks: + - id: copyright_checker + name: copyright_checker + entry: python ./.copyright.hook + language: system + files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto|py)$ + exclude: (?!.*third_party)^.*$ | (?!.*book)^.*$ diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..54131b48eca463aef817a4b96ba1b64de4b60aab --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at paddle-dev@baidu.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/CODE_OF_CONDUCT_cn.md b/CODE_OF_CONDUCT_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..c3a22f29cce443de1865f30e67663ba5a60a3040 --- /dev/null +++ b/CODE_OF_CONDUCT_cn.md @@ -0,0 +1,50 @@ +# 貢獻者公約 + +## 我們的承諾 + +為了促進一個開放透明且受歡迎的環境,我們作為貢獻者和維護者保證,無論年齡、種族、民族、性別認同和表達、體型、殘疾、經驗水平、國籍、個人表現、宗教或性別取向,在我們的專案以及社群的參與者都有不被騷擾的體驗。 + +## 我們的準則 + +舉例來說有助於創造正面環境的行為包括: +* 使用歡迎和包容性語言 +* 尊重不同的觀點和經驗 +* 優雅地接受建設性批評 +* 關注在對於社群最好的事情上 +* 對其他社群成員的表現友善 + +舉例來說身為參與者不能接受的行為包括: +* 使用與性有關的言語或是圖像,以及不受歡迎的性騷擾 +* 酸民/反串/釣魚行為或進行侮辱/貶損的評論,人身攻擊及政治攻擊 +* 公開或私下的騷擾 +* 未經許可地發布他人的個人資料,例如住址或是電子地址 +* 其他可以被合理地認定為不恰當或者違反職業操守的行為 + +## 我們的責任 + +專案維護者有責任為"可接受的行為"準則做出詮釋,以及對已發生的不被接受的行為採取恰當且公平的糾正措施。 + +專案維護者有權力及責任去刪除、編輯、拒絕與本行為準則有所違背的評論(comments)、提交(commits)、程式碼、wiki 編輯、問題(issues)和其他貢獻,以及專案維護者可暫時或永久性的禁止任何他們認為有不適當、威脅、冒犯、有害行為的貢獻者。 + +## 使用範圍 + +當一個人代表該專案或是其社群時,本行為準則適用於其專案平台和公共平台。 + +代表專案或是社群的情況,舉例來說包括使用官方專案的電子郵件地址、通過官方的社群媒體帳號發布或線上或線下事件中擔任指定代表。 + +該專案的呈現方式可由其專案維護者進行進一步的定義及解釋。 + +## 強制執行 + +可以透過paddle-dev@baidu.com,來聯繫專案團隊來報告濫用、騷擾或其他不被接受的行為。 + +任何維護團隊認為有必要且適合的所有投訴都將進行審查及調查,並做出相對應的回應。專案小組有對事件回報者有保密的義務。具體執行的方針近一步細節可能會單獨公佈。 + +沒有真誠的遵守或是執行本行為準則的專案維護人員,可能會因專案領導人或是其他成員的決定,暫時或是永久的取消其身份。 + +## 來源 + +本行為準則改編自[貢獻者公約][首頁],版本 1.4 +可在此觀看https://www.contributor-covenant.org/zh-tw/version/1/4/code-of-conduct.html + +[首頁]: https://www.contributor-covenant.org diff --git a/README.md b/README.md index 577528e7aaf45ce002467590ec66b19afb145920..d06375a444dd65675bdd75baccf8445c1638a87c 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,7 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl - Optimized math operations through SSE/AVX intrinsics, BLAS libraries (e.g. MKL, OpenBLAS, cuBLAS) or customized CPU/GPU kernels. + - Optimized CNN networks through MKL-DNN library. - Highly optimized recurrent networks which can handle **variable-length** sequence without padding. - Optimized local and distributed training for models with high dimensional diff --git a/adversarial/advbox/attacks/base.py b/adversarial/advbox/attacks/base.py index 98a65f2fddff999ac6fa98a5733128a63a60f916..000baa48f626c7dddce49502d20c499f6424cd06 100644 --- a/adversarial/advbox/attacks/base.py +++ b/adversarial/advbox/attacks/base.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ The base model of the model. """ diff --git a/adversarial/advbox/attacks/gradientsign.py b/adversarial/advbox/attacks/gradientsign.py index 15b1d176cb11330ac290d73aec1419a3d8f3cc4c..cc26ffb69020a87f559c537f03de84f7c2bea2de 100644 --- a/adversarial/advbox/attacks/gradientsign.py +++ b/adversarial/advbox/attacks/gradientsign.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ This module provide the attack method for FGSM's implement. """ @@ -36,3 +49,39 @@ class GradientSignAttack(Attack): FGSM = GradientSignAttack + + +class IteratorGradientSignAttack(Attack): + """ + This attack was originally implemented by Alexey Kurakin(Google Brain). + Paper link: https://arxiv.org/pdf/1607.02533.pdf + """ + + def _apply(self, image_label, epsilons=100, steps=10): + """ + Apply the iterative gradient sign attack. + Args: + image_label(list): The image and label tuple list of one element. + epsilons(list|tuple|int): The epsilon (input variation parameter). + steps(int): The number of iterator steps. + Return: + numpy.ndarray: The adversarail sample generated by the algorithm. + """ + assert len(image_label) == 1 + pre_label = np.argmax(self.model.predict(image_label)) + gradient = self.model.gradient(image_label) + min_, max_ = self.model.bounds() + + if not isinstance(epsilons, Iterable): + epsilons = np.linspace(0, 1, num=epsilons + 1) + + for epsilon in epsilons: + adv_img = image_label[0][0].reshape(gradient.shape) + for _ in range(steps): + gradient = self.model.gradient([(adv_img, image_label[0][1])]) + gradient_sign = np.sign(gradient) * (max_ - min_) + adv_img = adv_img + epsilon * gradient_sign + adv_img = np.clip(adv_img, min_, max_) + adv_label = np.argmax(self.model.predict([(adv_img, 0)])) + if pre_label != adv_label: + return adv_img diff --git a/adversarial/advbox/models/base.py b/adversarial/advbox/models/base.py index 74e1045def7648b4a8df30e89312d73c0d4fe7e1..084e563f7b423f54f350b90649b04acc17b2db97 100644 --- a/adversarial/advbox/models/base.py +++ b/adversarial/advbox/models/base.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ The base model of the model. """ diff --git a/adversarial/advbox/models/paddle.py b/adversarial/advbox/models/paddle.py index 33b2a3d5c6973470fb25c98872cd53b3ff11bab4..4048b47f897000c1b004cb05f8bbca985d5bbbb8 100644 --- a/adversarial/advbox/models/paddle.py +++ b/adversarial/advbox/models/paddle.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import absolute_import import numpy as np diff --git a/adversarial/fluid_mnist.py b/adversarial/fluid_mnist.py index db4d4b51868ffa8be13d4d57a40e1def7e25d1a8..f8c7fe8d0ef6a6b0756ef73c14d8937b9cd1a738 100644 --- a/adversarial/fluid_mnist.py +++ b/adversarial/fluid_mnist.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ CNN on mnist data using fluid api of paddlepaddle """ diff --git a/adversarial/mnist_tutorial_fgsm.py b/adversarial/mnist_tutorial_fgsm.py index 8b29346b8cd7f643771640afc4f783f7544cd071..c63e030cd826abe24eacab21394c612b7c2d9495 100644 --- a/adversarial/mnist_tutorial_fgsm.py +++ b/adversarial/mnist_tutorial_fgsm.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ FGSM demos on mnist using advbox tool. """ diff --git a/benchmark/paddle/image/alexnet.py b/benchmark/paddle/image/alexnet.py index cad6051f1413a5bb95f87a940f3aa81e49e5d282..07f478d8fa4e1ac4c584d4c410f75555c7926d4b 100644 --- a/benchmark/paddle/image/alexnet.py +++ b/benchmark/paddle/image/alexnet.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from paddle.trainer_config_helpers import * diff --git a/benchmark/paddle/image/googlenet.py b/benchmark/paddle/image/googlenet.py index 2a850ccb7f2c75b467554181fc5f4aa8f2b97a09..3241be9c5f56d3e3b422081e26878ae690a33268 100644 --- a/benchmark/paddle/image/googlenet.py +++ b/benchmark/paddle/image/googlenet.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from paddle.trainer_config_helpers import * diff --git a/benchmark/paddle/image/provider.py b/benchmark/paddle/image/provider.py index 1018ec9ce1e529f618ddd7b7afa72a84c5e876a1..220c4bee35c89e90d7ba5edbb9b92632a0215811 100644 --- a/benchmark/paddle/image/provider.py +++ b/benchmark/paddle/image/provider.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import io, os import random import numpy as np diff --git a/benchmark/paddle/image/resnet.py b/benchmark/paddle/image/resnet.py index 2846e4763f1cda4602f03af5ec649d57ee6cf0d8..acc6d31d4bb4eeca3e1d98e7b42ea8a2bd8bc90b 100644 --- a/benchmark/paddle/image/resnet.py +++ b/benchmark/paddle/image/resnet.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from paddle.trainer_config_helpers import * diff --git a/benchmark/paddle/image/smallnet_mnist_cifar.py b/benchmark/paddle/image/smallnet_mnist_cifar.py index 58879c454f37991405d83bbb593bb5d1e977ff53..64a5da3220bf3294b88c015fe133e7b04573d954 100644 --- a/benchmark/paddle/image/smallnet_mnist_cifar.py +++ b/benchmark/paddle/image/smallnet_mnist_cifar.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from paddle.trainer_config_helpers import * diff --git a/benchmark/paddle/image/vgg.py b/benchmark/paddle/image/vgg.py index ca0a6798fb8c35b68cf84d263855955eb93ba0b0..a357207a6282aa7864a7ecffa8c54e7b2360b45e 100644 --- a/benchmark/paddle/image/vgg.py +++ b/benchmark/paddle/image/vgg.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from paddle.trainer_config_helpers import * diff --git a/benchmark/paddle/rnn/provider.py b/benchmark/paddle/rnn/provider.py index 928ca75daf84ccebb775364b0be0d8b3d5eebff9..c03df3a0026447d5ab239f90600975751d287799 100644 --- a/benchmark/paddle/rnn/provider.py +++ b/benchmark/paddle/rnn/provider.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import io, os import random import numpy as np diff --git a/benchmark/paddle/rnn/rnn.py b/benchmark/paddle/rnn/rnn.py index 83eb3e565473f7e7e91cddeaa3cd2aafb7e3df2c..97005f2c351ff2301a1b834e79931bb6ccc7abc8 100755 --- a/benchmark/paddle/rnn/rnn.py +++ b/benchmark/paddle/rnn/rnn.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from paddle.trainer_config_helpers import * diff --git a/benchmark/tensorflow/image/alexnet.py b/benchmark/tensorflow/image/alexnet.py index f6a39ef778e21bee7374718a1b1ddf43392825a8..edf462e6a188f4a83d7276086fcacfb644b562a1 100644 --- a/benchmark/tensorflow/image/alexnet.py +++ b/benchmark/tensorflow/image/alexnet.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from six.moves import xrange # pylint: disable=redefined-builtin from datetime import datetime import math diff --git a/benchmark/tensorflow/image/alexnet_multi_gpu.py b/benchmark/tensorflow/image/alexnet_multi_gpu.py index 7b5ee78f4dd5429abd85d75c092a6e3a2a39f922..90b8f16bca027fd14a62c2e65bdb606c6b7d79d9 100644 --- a/benchmark/tensorflow/image/alexnet_multi_gpu.py +++ b/benchmark/tensorflow/image/alexnet_multi_gpu.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from six.moves import xrange # pylint: disable=redefined-builtin from datetime import datetime import math diff --git a/benchmark/tensorflow/image/googlenet.py b/benchmark/tensorflow/image/googlenet.py index decf855b54451efba5f6a7868fbcf631789f3572..55431eceb3ce41c05fc5fb2d417135494adf3b2c 100644 --- a/benchmark/tensorflow/image/googlenet.py +++ b/benchmark/tensorflow/image/googlenet.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from six.moves import xrange from datetime import datetime import math diff --git a/benchmark/tensorflow/image/googlenet_multi_gpu.py b/benchmark/tensorflow/image/googlenet_multi_gpu.py index 31466faa37c47c66e4fe4628e28c867875e89f2e..44de3800a8a42d90debae2c567795789f3eb0a7d 100644 --- a/benchmark/tensorflow/image/googlenet_multi_gpu.py +++ b/benchmark/tensorflow/image/googlenet_multi_gpu.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from six.moves import xrange # pylint: disable=redefined-builtin from datetime import datetime import math diff --git a/benchmark/tensorflow/image/smallnet_mnist_cifar.py b/benchmark/tensorflow/image/smallnet_mnist_cifar.py index 1a625134a6c58586b29190ede9c66253f484d2cf..0858b5f9c9c60264c0427c9e1fbdfcd167cf418e 100644 --- a/benchmark/tensorflow/image/smallnet_mnist_cifar.py +++ b/benchmark/tensorflow/image/smallnet_mnist_cifar.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from six.moves import xrange # pylint: disable=redefined-builtin from datetime import datetime import math diff --git a/benchmark/tensorflow/rnn/reader.py b/benchmark/tensorflow/rnn/reader.py index f538329a15ea9ad9293c97c94340989e2c421eb2..710940c9ae24eb307ed864aa2b17d66c1d6ee29b 100755 --- a/benchmark/tensorflow/rnn/reader.py +++ b/benchmark/tensorflow/rnn/reader.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import os.path import io import numpy as np diff --git a/benchmark/tensorflow/rnn/rnn.py b/benchmark/tensorflow/rnn/rnn.py index f288083e13656563b511980553245142efec4e65..507481b9ccd3a037d396f91bb860255a343905c5 100755 --- a/benchmark/tensorflow/rnn/rnn.py +++ b/benchmark/tensorflow/rnn/rnn.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from six.moves import xrange # pylint: disable=redefined-builtin import math diff --git a/benchmark/tensorflow/rnn/rnn_multi_gpu.py b/benchmark/tensorflow/rnn/rnn_multi_gpu.py index eabee4fa8fe6325212ace1c11be4862cd2720b08..f24cbaef62d94f2d94c243bd3c8ffcf1dcec7614 100755 --- a/benchmark/tensorflow/rnn/rnn_multi_gpu.py +++ b/benchmark/tensorflow/rnn/rnn_multi_gpu.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python from six.moves import xrange # pylint: disable=redefined-builtin import re diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index abee6698e30b7e76ca42825ed225876bf2ba5ec0..79b2449fe6689993bbee8a24ae7c46b452afe0a0 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -33,7 +33,7 @@ ExternalProject_Add( extern_grpc DEPENDS protobuf zlib GIT_REPOSITORY "https://github.com/grpc/grpc.git" - GIT_TAG "v1.7.x" + GIT_TAG "v1.8.x" PREFIX ${GRPC_SOURCES_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/cmake/make_resource.py b/cmake/make_resource.py index a9241b0e3e36c2e79c79e46b4f9114b7f6947341..d71e82eca2cd10179d5ec498f7fb2aa5da679c9f 100644 --- a/cmake/make_resource.py +++ b/cmake/make_resource.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import os import re import sys diff --git a/doc/api/v1/data_provider/src/mnist_config.py b/doc/api/v1/data_provider/src/mnist_config.py index 429338c57f8f865f0c5835d933445b65ee2ea7aa..427e0465a68b630ff8a14337e326777f41b6481a 100644 --- a/doc/api/v1/data_provider/src/mnist_config.py +++ b/doc/api/v1/data_provider/src/mnist_config.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * define_py_data_sources2( diff --git a/doc/api/v1/data_provider/src/mnist_provider.dict.py b/doc/api/v1/data_provider/src/mnist_provider.dict.py index 2ba0b126a0d6239f84950e130410aaaa6e1f24cd..3fbb783e2f66273ce79c6736a00d01ec58514bc9 100644 --- a/doc/api/v1/data_provider/src/mnist_provider.dict.py +++ b/doc/api/v1/data_provider/src/mnist_provider.dict.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer.PyDataProvider2 import * diff --git a/doc/api/v1/data_provider/src/sentimental_config.py b/doc/api/v1/data_provider/src/sentimental_config.py index 7ce71608a2372b2484ae40ccf01f0621728ddef2..edbf3cf1400f5fde5dfa225e5bdbb60400a0691c 100644 --- a/doc/api/v1/data_provider/src/sentimental_config.py +++ b/doc/api/v1/data_provider/src/sentimental_config.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * dictionary = dict() diff --git a/doc/api/v1/data_provider/src/sentimental_provider.py b/doc/api/v1/data_provider/src/sentimental_provider.py index 14bd0e05a921dbfd5212d8483524d3af3e4ae98f..03ad1fe7d8c66233b078f94aa303e27cffb8e83c 100644 --- a/doc/api/v1/data_provider/src/sentimental_provider.py +++ b/doc/api/v1/data_provider/src/sentimental_provider.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer.PyDataProvider2 import * diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index 24bdf08fffd176a799fd12680f4651bb4bd0c9a9..0eb531cf021bd63c5ad93d9a1d12e9be811e4407 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -358,6 +358,18 @@ reduce_min .. autofunction:: paddle.v2.fluid.layers.reduce_min :noindex: + +split +----- +.. autofunction:: paddle.v2.fluid.layers.split + :noindex: + + +matmul +------ +.. autofunction:: paddle.v2.fluid.layers.matmul + :noindex: + logsigmoid ---------- .. autofunction:: paddle.v2.fluid.layers.logsigmoid @@ -487,3 +499,8 @@ swish ------ .. autofunction:: paddle.v2.fluid.layers.swish :noindex: + +l2_normalize +------------ +.. autofunction:: paddle.v2.fluid.layers.l2_normalize + :noindex: diff --git a/doc/api/v2/fluid/nets.rst b/doc/api/v2/fluid/nets.rst index b792efb71f85ae643df655568da69c82414e9d5d..f6b1cb4ba10659fb336899f08376c265c67290f1 100644 --- a/doc/api/v2/fluid/nets.rst +++ b/doc/api/v2/fluid/nets.rst @@ -20,3 +20,14 @@ sequence_conv_pool :noindex: +glu +--- +.. autofunction:: paddle.v2.fluid.nets.glu + :noindex: + + +dot_product_attention +--------------------- +.. autofunction:: paddle.v2.fluid.nets.dot_product_attention + :noindex: + diff --git a/doc/faq/local/src/reduce_min_pool_size.py b/doc/faq/local/src/reduce_min_pool_size.py index 5715397cc11e18246b8522fcc5b4f05780c9a0a7..96073633d2b45bf83927b8cf446919fe916438c2 100644 --- a/doc/faq/local/src/reduce_min_pool_size.py +++ b/doc/faq/local/src/reduce_min_pool_size.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. @provider(min_pool_size=0, ...) def process(settings, filename): os.system('shuf %s > %s.shuf' % (filename, filename)) # shuffle before. diff --git a/doc/faq/local/src/word2vec_config.py b/doc/faq/local/src/word2vec_config.py index 866b40c3d4c96c1213b3f716f29b14dd38763edb..03619b2628ffca6166e8784222b7ea0196194b82 100644 --- a/doc/faq/local/src/word2vec_config.py +++ b/doc/faq/local/src/word2vec_config.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. ... # the settings and define data provider is omitted. DICT_DIM = 3000 # dictionary dimension. word_ids = data_layer('word_ids', size=DICT_DIM) diff --git a/doc/faq/local/src/word2vec_dataprovider.py b/doc/faq/local/src/word2vec_dataprovider.py index ec2753a7d01d7dd4d804c3bed0bac1be9c3fb3d3..a439a8f52ebc13ef281012e647834fd53a924d74 100644 --- a/doc/faq/local/src/word2vec_dataprovider.py +++ b/doc/faq/local/src/word2vec_dataprovider.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. DICT_DIM = 3000 diff --git a/doc/getstarted/concepts/src/infer.py b/doc/getstarted/concepts/src/infer.py index 4cc58dfee0bd6dade0340b4fd0ee1adb49ffebf6..ee71cd7a9a4fbddb93fa3aa2d9349f01f3673982 100644 --- a/doc/getstarted/concepts/src/infer.py +++ b/doc/getstarted/concepts/src/infer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2 as paddle import numpy as np diff --git a/doc/getstarted/concepts/src/train.py b/doc/getstarted/concepts/src/train.py index 4bccbfca3c70c12aec564e2cae3b8ca174b68777..d9c0c66b8a7bbb66d7b66cce38220a4c62fd6849 100644 --- a/doc/getstarted/concepts/src/train.py +++ b/doc/getstarted/concepts/src/train.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2 as paddle import numpy as np diff --git a/doc/howto/usage/capi/organization_of_the_inputs_cn.md b/doc/howto/usage/capi/organization_of_the_inputs_cn.md index 563ec5ca21ec5d75800fa201943d65e6d6fe51ea..a889ae4ffab7be02468b4a5ac5a18e3cc77803c9 100644 --- a/doc/howto/usage/capi/organization_of_the_inputs_cn.md +++ b/doc/howto/usage/capi/organization_of_the_inputs_cn.md @@ -19,7 +19,7 @@ ### 基本使用概念 -- 在PaddlePaddle内部,神经网络中一个计算层的输入/输出被组织为一个 `Argument` 结构体,如果神经网络有多个输入或者多个输入,每一个输入/输入都会对应有自己的`Argument`。 +- 在PaddlePaddle内部,神经网络中一个计算层的输入/输出被组织为一个 `Argument` 结构体,如果神经网络有多个输入或者多个输出,每一个输入/输出都会对应有自己的`Argument`。 - `Argument` 并不真正“存储”数据,而是将输入/输出信息有机地组织在一起。 - 在`Argument`内部由`IVector`(对应着上文提到的一维整型数组)和`Matrix`(对应着上文提到的二维浮点型矩阵)来实际存储数据;由 `Sequence Start Positions` (下文详细解释) 来描述输入/输出的序列信息。 diff --git a/doc/howto/usage/cluster/fluid_cluster_train_en.md b/doc/howto/usage/cluster/fluid_cluster_train_en.md new file mode 100644 index 0000000000000000000000000000000000000000..419eac51aa52c765a202856b3f1620e742b29cb6 --- /dev/null +++ b/doc/howto/usage/cluster/fluid_cluster_train_en.md @@ -0,0 +1,138 @@ +# Fluid Distributed Training + +## Introduction + +In this article, we'll explain how to config and run distributed training jobs with PaddlePaddle Fluid in a bare metal cluster. + +## Preparations + +### Get your cluster ready + +Prepare your computer nodes in the cluster. Nodes in this cluster can be of any specification that runs PaddlePaddle, and with a unique IP address assigned to it. Make sure they can communicate with each other. + +### Have PaddlePaddle installed + +PaddlePaddle must be installed on all nodes. If you have GPU cards on your nodes, be sure to properly install drivers and CUDA libraries. + +PaddlePaddle build and installation guide can be found from [here](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/index_en.html). + +### Update training script + +#### Non-cluster training script + +Let's take [Deep Learning 101](http://www.paddlepaddle.org/docs/develop/book/01.fit_a_line/index.html)'s first chapter: "fit a line" as an example. + +This demo's non-cluster version with fluid API is as follows: + +``` python +import paddle.v2 as paddle +import paddle.v2.fluid as fluid + +x = fluid.layers.data(name='x', shape=[13], dtype='float32') +y_predict = fluid.layers.fc(input=x, size=1, act=None) +y = fluid.layers.data(name='y', shape=[1], dtype='float32') + +cost = fluid.layers.square_error_cost(input=y_predict, label=y) +avg_cost = fluid.layers.mean(x=cost) + +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) + +BATCH_SIZE = 20 + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) + +place = fluid.CPUPlace() +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) +exe = fluid.Executor(place) + +exe.run(fluid.default_startup_program()) + +PASS_NUM = 100 +for pass_id in range(PASS_NUM): + fluid.io.save_persistables(exe, "./fit_a_line.model/") + fluid.io.load_persistables(exe, "./fit_a_line.model/") + for data in train_reader(): + avg_loss_value, = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost]) + + if avg_loss_value[0] < 10.0: + exit(0) # if avg cost less than 10.0, we think our code is good. +exit(1) +``` + +We created a simple fully connected neural networks training program and handed it to the fluid executor to run for 100 passes. + +Now let's try to convert it to a distributed version to run in a cluster. + +#### Introducing parameter server + +As you see from the non-cluster version of training script, there is only one role in it: the trainer, who does the computing as well as holding parameters. In cluster training, since multi-trainers are working on the same task, they need one centralized place to hold and distribute parameters. This centralized place is called the Parameter Server in PaddlePaddle. + +![parameter server architect](src/trainer.png) + +Parameter Server in fluid does not only hold parameters but is also assigned with a part of the program. Trainers communicate with parameter servers via send/receive OPs. For more tech detail, please refer to this [document](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/dist_refactor/distributed_architecture.md). + +Now we need to create program for both trainers and parameter servers, the question is how? + +#### Slice the program + +Fluid provides a tool called "Distribute Transpiler" to automatically convert the non-cluster program into cluster program. + +The idea behind this tool is to find optimize OPs and gradient parameters, slice the program into 2 pieces and connect them with send/receive OP. + +Optimize OPs and gradient parameters can be found from the return values of optimizer's minimize function. + +To put them together: + +``` python +... #define the program, cost, and create sgd optimizer + +optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) #get optimize OPs and gradient parameters + +t = fluid.DistributeTranspiler() # create transpiler instance +# slice the program into 2 pieces with optimizer_ops and gradient parameters list, as well as pserver_endpoints, which is a comma separated list of [IP:PORT] and number of trainers +t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) + +... #create executor + +# in pserver, run this +exe.run(fluid.default_startup_program()) +#current_endpoint here means current pserver IP:PORT you wish to run on +exe.run(t.get_pserver_program(current_endpoint, optimize_ops)) + +# in trainer, run this +... # define data reader +exe.run(fluid.default_startup_program()) +for pass_id in range(100): + for data in train_reader(): + exe.run(t.get_trainer_program()) + + +``` + +### E2E demo + +Please find the complete demo from [here](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py). In parameter server node run this in the command line: + +``` bash +PSERVERS=192.168.1.2:6174 SERVER_ENDPOINT=192.168.1.2:6174 TRAINING_ROLE=PSERVER python notest_dist_fit_a_line.py +``` + +*please note we assume that your parameter server runs at 192.168.1.2:6174* + +Wait until the prompt `Server listening on 192.168.1.2:6174` + +Then in 2 of your trainer node run this: + +``` bash +PSERVERS=192.168.1.2:6174 SERVER_ENDPOINT=192.168.1.2:6174 TRAINING_ROLE=TRAINER python notest_dist_fit_a_line.py +``` + +*the reason you need to run this command twice in 2 nodes is: in the script we set the trainer count to be 2. You can change this setting on line 50* + +Now you have 2 trainers and 1 parameter server up and running. diff --git a/doc/howto/usage/cluster/src/k8s_train/start_paddle.py b/doc/howto/usage/cluster/src/k8s_train/start_paddle.py index 935c12bb67e1fe08bc135a7a2220fcd43c548482..1774f8b640c5a2dee036db36b40123b6de7bf68c 100755 --- a/doc/howto/usage/cluster/src/k8s_train/start_paddle.py +++ b/doc/howto/usage/cluster/src/k8s_train/start_paddle.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/python # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # diff --git a/doc/howto/usage/cluster/src/word2vec/api_train_v2.py b/doc/howto/usage/cluster/src/word2vec/api_train_v2.py index c0940f0e56eafa22f8aeb7052c0ddc79d8862917..d449e02023f1ec48669ce734a24093aed031adc5 100644 --- a/doc/howto/usage/cluster/src/word2vec/api_train_v2.py +++ b/doc/howto/usage/cluster/src/word2vec/api_train_v2.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import gzip import math diff --git a/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py b/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py index 2e6d8887124a5524505b097803a60a35478ca644..a5dd347f0b594e6037182bfad39a6a736fdbab66 100644 --- a/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py +++ b/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import math import os import paddle.v2 as paddle diff --git a/go/pserver/client/c/test/test_mnist.py b/go/pserver/client/c/test/test_mnist.py index c3a3af55e2812fa0c965d22ddaba198f43f3c4ad..7b50a10afc68b87f331c4d0afede9413b3aa2d35 100644 --- a/go/pserver/client/c/test/test_mnist.py +++ b/go/pserver/client/c/test/test_mnist.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2 as paddle import gzip diff --git a/go/pserver/client/c/test/test_train.py b/go/pserver/client/c/test/test_train.py index 8d9c6b9b20f515ed0865df8cf46b6dfc2d8ffa34..7ef0fca496e8a3836d1a38b0ff576652d72ce177 100644 --- a/go/pserver/client/c/test/test_train.py +++ b/go/pserver/client/c/test/test_train.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2 as paddle import paddle.v2.dataset.uci_housing as uci_housing import paddle.v2.master as master diff --git a/paddle/api/test/testTrainConfig.py b/paddle/api/test/testTrainConfig.py index 77e0cd37d566d2571fada76b9948a9b0616ad044..ab9a83e4a35305bcab42a5e05d8f4880f4edd9bb 100644 --- a/paddle/api/test/testTrainConfig.py +++ b/paddle/api/test/testTrainConfig.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=100, learning_method=AdamOptimizer()) diff --git a/paddle/capi/examples/model_inference/common/common.h b/paddle/capi/examples/model_inference/common/common.h index e32f2f9836f63ba10ef5be447a4c41514e079219..9efcbc387e6080f8c9ed284412d1abced658a0cd 100644 --- a/paddle/capi/examples/model_inference/common/common.h +++ b/paddle/capi/examples/model_inference/common/common.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #ifndef __CAPI_EXAMPLE_COMMON_H__ #define __CAPI_EXAMPLE_COMMON_H__ #include diff --git a/paddle/capi/examples/model_inference/dense/merge_v2_model.py b/paddle/capi/examples/model_inference/dense/merge_v2_model.py index c030d572cbdb15cb5e90f2685723a81efb230f81..760a485a53f5edd31622a0f25a7bf32e6230e9a4 100644 --- a/paddle/capi/examples/model_inference/dense/merge_v2_model.py +++ b/paddle/capi/examples/model_inference/dense/merge_v2_model.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.utils.merge_model import merge_v2_model from mnist_v2 import network diff --git a/paddle/capi/examples/model_inference/dense/mnist_v2.py b/paddle/capi/examples/model_inference/dense/mnist_v2.py index ee28111153ca2cf24b9789452c65a0f4c7b64538..174436bd1d7c1efaf8b25b002ada38b6babeba1c 100644 --- a/paddle/capi/examples/model_inference/dense/mnist_v2.py +++ b/paddle/capi/examples/model_inference/dense/mnist_v2.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import os import sys import gzip diff --git a/paddle/capi/examples/model_inference/dense/trainer_config.py b/paddle/capi/examples/model_inference/dense/trainer_config.py index 873ec119e7a3d4debe50af2ba259ace50b0cbf7c..fbf08903578b309ee163b1f4c70b80133a4324b8 100644 --- a/paddle/capi/examples/model_inference/dense/trainer_config.py +++ b/paddle/capi/examples/model_inference/dense/trainer_config.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * img = data_layer(name='pixel', size=784) diff --git a/paddle/capi/examples/model_inference/sequence/trainer_config.py b/paddle/capi/examples/model_inference/sequence/trainer_config.py index 6bbc7a909aa03950ce621efa43fa47d9cdd016f8..c1326bb95550dc3a7ac6385a81158b5bbd88d5af 100644 --- a/paddle/capi/examples/model_inference/sequence/trainer_config.py +++ b/paddle/capi/examples/model_inference/sequence/trainer_config.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * WORD_DIM = 3000 diff --git a/paddle/capi/tests/test_predict_network.py b/paddle/capi/tests/test_predict_network.py index 82ef5cb1a70398df65ace3c802076743c3ebe341..46a985d47652faa573d1b9817952a2b81d0db8fc 100644 --- a/paddle/capi/tests/test_predict_network.py +++ b/paddle/capi/tests/test_predict_network.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=100) diff --git a/paddle/cuda/src/avx_mathfun.h b/paddle/cuda/src/avx_mathfun.h index 2412ed5abc13b2a83521a75524f581e106788b60..a0ba71faba9131f6f3f031c8276c3851090fac45 100644 --- a/paddle/cuda/src/avx_mathfun.h +++ b/paddle/cuda/src/avx_mathfun.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* AVX implementation of sin, cos, sincos, exp and log diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 692406b1c37d0c02714eafb5cf9a28329ed873bc..72743b5fd0b32479ccbf28fbf98032df8fa371e9 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "paddle/framework/backward.h" diff --git a/paddle/framework/dim.h b/paddle/framework/dim.h index 04d4b0e604e6f73ad94e0ca79d6b69f663bd4076..ec17d7c6156351d21a4f9431f85fb0bcf00e4331 100644 --- a/paddle/framework/dim.h +++ b/paddle/framework/dim.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #pragma once #include diff --git a/paddle/framework/dim_test.cu b/paddle/framework/dim_test.cu index 0a6a87669c900de6cb507dd48f0cfc871defe279..2bcab7c5c2e454e86a148fde003164d369c46ef2 100644 --- a/paddle/framework/dim_test.cu +++ b/paddle/framework/dim_test.cu @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include diff --git a/paddle/framework/eigen_test.cc b/paddle/framework/eigen_test.cc index bc4a2db32cfba66bef2c444e1f822e0d2a57b91e..c6f77ecfabdddbfdd1df646a1c9310c03930bd8e 100644 --- a/paddle/framework/eigen_test.cc +++ b/paddle/framework/eigen_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 87a57d095141cc456af2cbabbc227715a02375e9..3e239e9911d03a43987825ffa7824298a748ebda 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -135,6 +135,65 @@ bool operator==(const LoD &a, const LoD &b) { return true; } +bool CheckLoD(const LoD &in, int tensor_height) { + if (in.empty()) return true; + for (const auto &level : in) { + // check: there should be more than 2 offsets existing in each level. + if (level.size() < 2) return false; + // check: the first offset(the begin offset) of each level should be 0. + if (level.front() != 0) return false; + // check: all the offsets in a level should be ascending(no same items + // allows). + if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) { + if (a < b) return true; + return false; + })) { + LOG(INFO) << "ascending error"; + return false; + } + } + // check: the lowest level's last offset should equals `tensor_height` if + // tensor_height>0. + if (tensor_height > 0 && (size_t)tensor_height != in.back().back()) + return false; + + // check: the higher level's last offset should equals the lower level's + // size-1. + // NOTE LoD store the levels from top to bottom, so the higher level goes + // first. + for (size_t level = 0; level < in.size() - 1; level++) { + if (in[level].back() != in[level + 1].size() - 1) return false; + } + return true; +} + +bool CheckAbsLoD(const LoD &in, int tensor_height) { + if (in.empty()) return true; + for (const auto &level : in) { + // check: all the offsets in a level should be ascending(no same items + // allows). + if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) { + if (a < b) return true; + return false; + })) { + return false; + } + + // check: there should be more than 2 offsets existing in each level. + if (level.size() < 2) return false; + + // check: the first offset of each level should be 0, and the last should be + // the same(the height of underlying tensor). + if (level.front() != 0) return false; + if (tensor_height < 0) { + tensor_height = level.back(); + } else if ((size_t)tensor_height != level.back()) { + return false; + } + } + return true; +} + using LoDAndOffset = std::pair>; LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, size_t end_idx, size_t start_level) { @@ -232,23 +291,32 @@ std::vector LoDTensor::SplitLoDTensor( const std::vector places) const { check_memory_size(); PADDLE_ENFORCE(lod().empty(), "Disable parallel lod for now"); - PADDLE_ENFORCE(dims()[0] % places.size() == 0, - "Batch size should be divided by places size"); - - std::vector lods; - for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) { - int begin = place_idx * dims()[0] / places.size(); - int end = (place_idx + 1) * dims()[0] / places.size(); + size_t result_size = std::min(static_cast(dims()[0]), places.size()); + size_t remainder = dims()[0] % places.size(); + + std::vector results; + results.reserve(result_size); + + int step_width = static_cast(dims()[0] / result_size); + for (size_t i = 0; i < result_size; ++i) { + int begin = static_cast(i * step_width); + int end = static_cast((i + 1) * step_width); + if (i + 1 == places.size()) { // last + end += remainder; + } auto src = Slice(begin, end); - auto &dst_place = places[place_idx]; + auto &dst_place = places[i]; LoDTensor dst; - framework::Copy(src, dst_place, &dst); - - lods.emplace_back(dst); + if (!(dst_place == place())) { + framework::Copy(src, dst_place, &dst); + } else { // It is no need to copy if src_place and dst_place are same. + dst.ShareDataWith(src); + } + results.emplace_back(dst); } - return lods; + return results; } // TODO(tonyyang-svail): make this function support LoD @@ -259,12 +327,17 @@ void LoDTensor::MergeLoDTensor( framework::DDim new_dim = lod_tensors[0]->dims(); std::type_index new_type = lod_tensors[0]->type(); auto new_layout = lod_tensors[0]->layout(); + int64_t new_height = 0; for (auto *lod : lod_tensors) { - PADDLE_ENFORCE(new_dim == lod->dims()); - PADDLE_ENFORCE(new_type == lod->type()); - PADDLE_ENFORCE(new_layout == lod->layout()); + new_height += lod->dims()[0]; + for (int i = 1; i < new_dim.size(); ++i) { + PADDLE_ENFORCE_EQ(new_dim[i], lod->dims()[i]); + } + + PADDLE_ENFORCE_EQ(new_type, lod->type()); + PADDLE_ENFORCE_EQ(new_layout, lod->layout()); } - new_dim[0] *= lod_tensors.size(); + new_dim[0] = new_height; Resize(new_dim); set_layout(new_layout); diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 88ea78f2682b2ffc962c9663f6b3c636dedb931d..9d1294fdeb9bd76bf944f7ec3687e3c5bb333241 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -71,6 +71,38 @@ LoD ToAbsOffset(const LoD& in); bool operator==(const LoD& a, const LoD& b); +/* + * Check whether this lod's format is valid. + * + * ATTENTION: + * - Empty lod is treated as valid. + * + * It will check two things: + * + * 1. all the offsets in a level should be ascending(no same items allows). + * 2. there should be more than 2 offsets existing in each level. + * 3. the higher level's last offset should equals the lower level's size-1. + * 4. the first offset(the begin offset) of each level should be 0. + * 5. the lowest level's last offset should equals `tensor_height` if + * tensor_height>0. + */ + +bool CheckLoD(const LoD& in, int tensor_height = -1); +/* + * Check whether this absolute lod's format is valid. + * + * ATTENTION: + * - Empty lod is treated as valid. + * + * It will check two things: + * 1. all the offsets in a level should be ascending(no same items allows) + * 2. there should be more than 2 offsets existing in each level. + * 3. the first offset of each level should be 0, and the last should be the + * same(the height of underlying tensor) or `tensor_height` if + * tensor_height>0. + */ +bool CheckAbsLoD(const LoD& in, int tensor_height = -1); + /* * LoDTensor (Level of details Tensor) * see https://en.wikipedia.org/wiki/Level_of_details for reference. diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc index baad9c6f98ac135c3650fe3113522850328c1298..9c7ad6c7b47952bd137eeedf302e2b9182fe8279 100644 --- a/paddle/framework/lod_tensor_test.cc +++ b/paddle/framework/lod_tensor_test.cc @@ -1,15 +1,16 @@ -/* - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_tensor.h" @@ -22,38 +23,6 @@ namespace paddle { namespace framework { -const int kLodTensorSize = 20 * 128; - -class LoDTensorTester : public ::testing::Test { - public: - virtual void SetUp() override { - // tensor's batch_size: 30 - // 3 levels - // 0 10 20 - // 0 5 10 15 20 - // 0 2 5 7 10 12 15 20 - LoD lod; - lod.push_back(std::vector{0, 2, 3}); - lod.push_back(std::vector{0, 2, 5, 8}); - lod.push_back(std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20}); - - ASSERT_EQ(lod.size(), 3UL); - - lod_tensor_.Resize({20 /*batch size*/, 128 /*dim*/}); - // malloc memory - float* dst_ptr = lod_tensor_.mutable_data(place); - for (int i = 0; i < kLodTensorSize; ++i) { - dst_ptr[i] = i; - } - - lod_tensor_.set_lod(lod); - } - - protected: - platform::CPUPlace place; - LoDTensor lod_tensor_; -}; - TEST(LodExpand, test) { LoD lod{{0, 2}}; LoDTensor tensor; @@ -131,5 +100,53 @@ TEST(LoD, ToAbsOffset) { EXPECT_EQ(abs_lod, expected); } +TEST(LoD, CheckLoD) { + LoD relative_lod; + relative_lod.push_back(std::vector({0, 2})); + relative_lod.push_back(std::vector({0, 1, 3})); + relative_lod.push_back(std::vector({0, 2, 4, 5})); + + // check compatible + ASSERT_TRUE(CheckLoD(relative_lod)); + relative_lod[1].back()++; + ASSERT_FALSE(CheckLoD(relative_lod)); + relative_lod[1].back()--; // recover it + + // check empty + LoD empty_lod; + ASSERT_TRUE(CheckLoD(empty_lod)); + + // check less than 2 offsets in a level + LoD some_lod0; + some_lod0.push_back(std::vector({0})); + ASSERT_FALSE(CheckLoD(some_lod0)); + + // check with underlying tensor storage. + ASSERT_TRUE(CheckLoD(relative_lod, 5)); + ASSERT_FALSE(CheckLoD(relative_lod, 9)); +} + +TEST(LoD, CheckAbsLoD) { + LoD relative_lod; + relative_lod.push_back(std::vector({0, 2})); + relative_lod.push_back(std::vector({0, 1, 3})); + relative_lod.push_back(std::vector({0, 2, 4, 5})); + + auto abs_lod = ToAbsOffset(relative_lod); + + ASSERT_TRUE(CheckAbsLoD(abs_lod)); + + // check less than 2 offsets in a level. + + // check the last item should be compatible with tensor height. + abs_lod.back().back()++; + ASSERT_FALSE(CheckAbsLoD(abs_lod)); + abs_lod.back().back()--; // restore + + // check less than 2 offsets in a lod. + LoD abs_lod0; + abs_lod0.push_back(std::vector({0})); + ASSERT_FALSE(CheckAbsLoD(abs_lod0)); +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index e8508ad2658ae850e4c98aa798b5db6d007e67d0..0f46e9b1e3966a49ff0673231c65f608797c80fe 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index d75c0233e8e0134ddf4edc50c07490a234b65cd0..5de9ae559c435439f30931c7840e54e0d2bb744c 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -177,16 +177,16 @@ class OpKernelRegistrar : public Registrar { /** * Macro to register OperatorKernel. */ -#define REGISTER_OP_KERNEL(op_type, DEVICE_TYPE, place_class, ...) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_op_kernel_##op_type##_##DEVICE_TYPE##__, \ - "REGISTER_OP_KERNEL must be called in global namespace"); \ - static ::paddle::framework::OpKernelRegistrar \ - __op_kernel_registrar_##op_type##_##DEVICE_TYPE##__(#op_type, \ - #DEVICE_TYPE); \ - int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE() { \ - __op_kernel_registrar_##op_type##_##DEVICE_TYPE##__.Touch(); \ - return 0; \ +#define REGISTER_OP_KERNEL(op_type, LIBRARY_TYPE, place_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op_kernel_##op_type##_##LIBRARY_TYPE##__, \ + "REGISTER_OP_KERNEL must be called in global namespace"); \ + static ::paddle::framework::OpKernelRegistrar \ + __op_kernel_registrar_##op_type##_##LIBRARY_TYPE##__(#op_type, \ + #LIBRARY_TYPE); \ + int TouchOpKernelRegistrar_##op_type##_##LIBRARY_TYPE() { \ + __op_kernel_registrar_##op_type##_##LIBRARY_TYPE##__.Touch(); \ + return 0; \ } #define REGISTER_OP_CUDA_KERNEL(op_type, ...) \ @@ -208,14 +208,14 @@ class OpKernelRegistrar : public Registrar { static int use_op_itself_##op_type##_ __attribute__((unused)) = \ TouchOpRegistrar_##op_type() -#define USE_OP_DEVICE_KERNEL(op_type, DEVICE_TYPE) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __use_op_kernel_##op_type##_##DEVICE_TYPE##__, \ - "USE_OP_DEVICE_KERNEL must be in global namespace"); \ - extern int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE(); \ - static int use_op_kernel_##op_type##_##DEVICE_TYPE##_ \ - __attribute__((unused)) = \ - TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE() +#define USE_OP_DEVICE_KERNEL(op_type, LIBRARY_TYPE) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __use_op_kernel_##op_type##_##LIBRARY_TYPE##__, \ + "USE_OP_DEVICE_KERNEL must be in global namespace"); \ + extern int TouchOpKernelRegistrar_##op_type##_##LIBRARY_TYPE(); \ + static int use_op_kernel_##op_type##_##LIBRARY_TYPE##_ \ + __attribute__((unused)) = \ + TouchOpKernelRegistrar_##op_type##_##LIBRARY_TYPE() // TODO(fengjiayi): The following macros // seems ugly, do we have better method? diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index a1b4a03289eca4c8b9d8c23ede4221853cb31f79..c04cd38697f85b1d51aaa42b34a189f22bb4d5cc 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/paddle/framework/tensor_util.h b/paddle/framework/tensor_util.h index 091b63bf0f907a5449f08f0e36abb6577fa5e43e..b49c61449984f51d65963958c87191b0799bcf5b 100644 --- a/paddle/framework/tensor_util.h +++ b/paddle/framework/tensor_util.h @@ -315,9 +315,8 @@ inline void DeserializeFromStream(std::istream& is, Tensor* tensor, desc.data_type(), DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace())); is.read(static_cast(buf), cpu_tensor.memory_size()); - auto cpu_place = new platform::CPUPlace(); - framework::Copy(cpu_tensor, *cpu_place, dev_ctx, tensor); - delete cpu_place; + auto dst_place = dev_ctx.GetPlace(); + framework::Copy(cpu_tensor, dst_place, dev_ctx, tensor); #else PADDLE_THROW("Unexpected branch"); #endif diff --git a/paddle/framework/tensor_util_test.cc b/paddle/framework/tensor_util_test.cc index 3636125f2052200238ff82d4f708b62224322cdf..f541927c0e7e044adeabee01e8ec91e7d8ef7baf 100644 --- a/paddle/framework/tensor_util_test.cc +++ b/paddle/framework/tensor_util_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index 36b76fb196cfd4c7b3697dcf0cda9a23ff53deb3..03992c8608693259224901b2f9c89d458f126d09 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/paddle/gserver/layers/MKLDNNConcatLayer.cpp b/paddle/gserver/layers/MKLDNNConcatLayer.cpp index 44bb0883b89c712d70e2d4fdfe16bdfde86f81b7..520ccc1a995e966de73080b61a8c20cbee722267 100644 --- a/paddle/gserver/layers/MKLDNNConcatLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConcatLayer.cpp @@ -43,7 +43,7 @@ void MKLDNNConcatLayer::reshape( channels_[0] = ic; oc = ic; for (size_t i = 1; i < inputLayers_.size(); i++) { - int batchsize, height, witdh; + int batchsize = 0, height = 0, witdh = 0; reshapeInput(batchsize, height, witdh, i); CHECK_EQ(bs, batchsize); CHECK_EQ(ih, height); @@ -84,6 +84,7 @@ void MKLDNNConcatLayer::resetFwdBuffers(std::vector& inputs, bool has8c = false, has16c = false, hasnc = false; for (size_t i = 0; i < inputs.size(); i++) { resetInValue(inputs[i], nullptr, i, channels_[i]); + inputs[i]->downSpatial(); CHECK(inputs[i]); auto dm = inputs[i]->getDims(); // inputs format can be different, but ndims must equal diff --git a/paddle/gserver/tests/img_conv_cudnn.py b/paddle/gserver/tests/img_conv_cudnn.py index 3934607fa41f9b6d401f1c9ff4aec6715786799b..0ea6d6bae66b0a307748bd0d0fa9a53ed5f7927d 100644 --- a/paddle/gserver/tests/img_conv_cudnn.py +++ b/paddle/gserver/tests/img_conv_cudnn.py @@ -1,17 +1,16 @@ -#edit-mode: -*- python -*- -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * diff --git a/paddle/gserver/tests/img_conv_exconv.py b/paddle/gserver/tests/img_conv_exconv.py index ad5a8ba2bde17000ca3d7057c6f399ae28d938b0..c618cdab27c52d70042b0a118f7f6fe935a6b9d7 100644 --- a/paddle/gserver/tests/img_conv_exconv.py +++ b/paddle/gserver/tests/img_conv_exconv.py @@ -1,17 +1,16 @@ -#edit-mode: -*- python -*- -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * diff --git a/paddle/gserver/tests/pyDataProvider.py b/paddle/gserver/tests/pyDataProvider.py index 7235a239439b7544805d1bd06dfb1a72c2e0e937..d2ad5888b5a4c79d8b663ce8c2f313184151beb6 100644 --- a/paddle/gserver/tests/pyDataProvider.py +++ b/paddle/gserver/tests/pyDataProvider.py @@ -1,17 +1,16 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy import struct import traceback diff --git a/paddle/gserver/tests/rnn_data_provider.py b/paddle/gserver/tests/rnn_data_provider.py index 913365a5a4037d14fcba1e1546508ba89668e0d6..063a4127e542d23012359a2eac0045bf69a51356 100644 --- a/paddle/gserver/tests/rnn_data_provider.py +++ b/paddle/gserver/tests/rnn_data_provider.py @@ -1,17 +1,16 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer.PyDataProvider2 import * # Note that each config should has an independent provider diff --git a/paddle/gserver/tests/sequenceGen.py b/paddle/gserver/tests/sequenceGen.py index fd725727c04677b5ea8918f6721f0c007e80915d..04a1732d61c8618984d16550acf7c94da1bd3578 100644 --- a/paddle/gserver/tests/sequenceGen.py +++ b/paddle/gserver/tests/sequenceGen.py @@ -1,17 +1,16 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import os import sys diff --git a/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py b/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py index 7303d088043d5096a3491d3b3b32b231bde09a0a..aeaaa221f9fab981af88cfd63c30349e1b02a0ee 100644 --- a/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py +++ b/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py @@ -1,18 +1,16 @@ -# edit-mode: -*- python -*- -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * ######################## data source ################################ diff --git a/paddle/gserver/tests/sequence_recurrent.py b/paddle/gserver/tests/sequence_recurrent.py index 4895df186bfecc5cb5263676a9cd5bac5039d565..8786a5465db82d786d3772357b02ab837073a576 100644 --- a/paddle/gserver/tests/sequence_recurrent.py +++ b/paddle/gserver/tests/sequence_recurrent.py @@ -1,17 +1,16 @@ -#!/usr/bin/env python -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * diff --git a/paddle/gserver/tests/sequence_recurrent_group.py b/paddle/gserver/tests/sequence_recurrent_group.py index a1d54542e3bc4e89f70d31d5e89c0f44953c9f90..8b5a3d49838c9bb49321a9d7514fc0241e6d67cd 100644 --- a/paddle/gserver/tests/sequence_recurrent_group.py +++ b/paddle/gserver/tests/sequence_recurrent_group.py @@ -1,18 +1,16 @@ -#!/usr/bin/env python -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from paddle.trainer_config_helpers import * ######################## data source ################################ diff --git a/paddle/gserver/tests/sequence_rnn_matched_inputs.py b/paddle/gserver/tests/sequence_rnn_matched_inputs.py index 59e8c91733c42b6f13f723321d21bca98ab78bb7..0c55f2cf9d07b194aa06f88892f831f1a9ce6436 100644 --- a/paddle/gserver/tests/sequence_rnn_matched_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_matched_inputs.py @@ -1,17 +1,16 @@ -# edit-mode: -*- python -*- -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * diff --git a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py b/paddle/gserver/tests/sequence_rnn_mixed_inputs.py index 6fe9dca6e2cb0e14fee346b8307f67b804328471..22b376b91aa4736d16fead698105466d679dd248 100644 --- a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_mixed_inputs.py @@ -1,17 +1,16 @@ -# edit-mode: -*- python -*- -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * diff --git a/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py b/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py index 786a0c6d780e4e8deadb35e52901e42dae67a281..3ce87490bbd0f30a3c42b947b073adb2a6c5b51c 100644 --- a/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py @@ -1,17 +1,16 @@ -#edit-mode: -*- python -*- -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index 0d0fe476ff5eac8bf8ad1c9fe09b32c1a8f73ebc..044aede98e684a432c48b3ea5bb82a4a677682d4 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -1,17 +1,16 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import random from paddle.trainer.PyDataProvider2 import * diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 2569535c257c3210c239b69cd464ae59a8f4747c..2412ebd82a02c872e73fd310c56221309441f630 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -149,7 +149,7 @@ op_library(sequence_pool_op DEPS sequence_pooling) op_library(lstm_op DEPS sequence2batch lstm_compute) op_library(gru_op DEPS sequence2batch gru_compute) op_library(recurrent_op DEPS executor) -op_library(warpctc_op DEPS dynload_warpctc sequence_padding math_function) +op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale math_function) op_library(cos_sim_op DEPS cos_sim_functor) op_library(parallel_do_op DEPS executor) diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc index 573bb9c7dfdac2366c2458dd9f27a035a9f9b813..7adb74eab78dcdd0251b8db60781f6e24e348634 100644 --- a/paddle/operators/clip_op.cc +++ b/paddle/operators/clip_op.cc @@ -51,8 +51,8 @@ class ClipOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Clip Operator. -The clip operator limits the value of given input within an interval. The interval is -specified with arguments 'min' and 'max': +The clip operator limits the value of given input within an interval. The +interval is specified with arguments 'min' and 'max': $$ Out = \min(\max(X, min), max) diff --git a/paddle/operators/detail/grpc_client.cc b/paddle/operators/detail/grpc_client.cc index 5a4db2d7e686ce84abef620f890be8f3aa82cb73..aee56ffe018aa8d0d2106df24bd9358c930a02ca 100644 --- a/paddle/operators/detail/grpc_client.cc +++ b/paddle/operators/detail/grpc_client.cc @@ -87,7 +87,7 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, return true; } -bool RPCClient::wait() { +bool RPCClient::Wait() { bool ok = true; while (true) { @@ -96,7 +96,6 @@ bool RPCClient::wait() { } if (!Proceed()) { - LOG(ERROR) << "Get meets CompletionQueue error"; return false; } } @@ -110,9 +109,9 @@ bool RPCClient::Proceed() { // request counts. if (!cq_.Next(&tag, &ok)) { + LOG(ERROR) << "Get meets CompletionQueue error"; return false; } - req_count_--; GPR_ASSERT(ok); PADDLE_ENFORCE(tag); @@ -120,12 +119,15 @@ bool RPCClient::Proceed() { // TODO(gongwb): add more retries. ClientBase* c = static_cast(tag); if (!c->status_.ok()) { + LOG(ERROR) << "proc param error:" << c->var_h_.String() + << " grpc error:" << c->status_.error_message(); delete c; - return true; + return false; } c->Process(); delete c; + req_count_--; return true; } @@ -135,8 +137,12 @@ std::shared_ptr RPCClient::GetChannel(const std::string& ep) { return it->second; } + grpc::ChannelArguments args; + args.SetMaxSendMessageSize(std::numeric_limits::max()); + args.SetMaxReceiveMessageSize(std::numeric_limits::max()); + auto ch = std::shared_ptr( - grpc::CreateChannel(ep, grpc::InsecureChannelCredentials())); + grpc::CreateCustomChannel(ep, grpc::InsecureChannelCredentials(), args)); channels_[ep] = ch; return ch; diff --git a/paddle/operators/detail/grpc_client.h b/paddle/operators/detail/grpc_client.h index d27b5ced9ece67f9b9da3b7f87ec231477603580..a62e70a2533ae52d84d010504b19fed5aeb15dc0 100644 --- a/paddle/operators/detail/grpc_client.h +++ b/paddle/operators/detail/grpc_client.h @@ -130,7 +130,7 @@ class RPCClient { const framework::Scope& scope, const std::string& var_name, int64_t time_out = 600 * 1000); - bool wait(); + bool Wait(); private: bool Proceed(); diff --git a/paddle/operators/detail/grpc_server.cc b/paddle/operators/detail/grpc_server.cc index e8d561a57ff59e9221400241f881cb26fb6c6f06..c0b94746a0b7f6ffb657bbf5af18360426933858 100644 --- a/paddle/operators/detail/grpc_server.cc +++ b/paddle/operators/detail/grpc_server.cc @@ -28,12 +28,15 @@ class RequestBase { public: explicit RequestBase(sendrecv::SendRecvService::AsyncService* service, grpc::ServerCompletionQueue* cq) - : service_(service), cq_(cq), status_(PROCESS) {} + : service_(service), cq_(cq), status_(PROCESS) { + PADDLE_ENFORCE(cq_); + } virtual ~RequestBase() {} virtual void Process() { assert(false); } CallStatus Status() { return status_; } void SetStatus(CallStatus status) { status_ = status; } + virtual std::string GetReqName() { assert(false); } protected: grpc::ServerContext ctx_; @@ -56,12 +59,14 @@ class RequestSend final : public RequestBase { virtual ~RequestSend() {} + virtual std::string GetReqName() { return request_.varname(); } + virtual void Process() { MessageWithName msg_with_name = std::make_pair(request_.varname(), std::move(request_)); queue_->Push(std::move(msg_with_name)); - // TODO(gongwb): check var's info. responder_.Finish(reply_, grpc::Status::OK, this); + status_ = FINISH; } protected: @@ -74,20 +79,27 @@ class RequestSend final : public RequestBase { class RequestGet final : public RequestBase { public: explicit RequestGet(sendrecv::SendRecvService::AsyncService* service, - grpc::ServerCompletionQueue* cq, framework::Scope* scope) - : RequestBase(service, cq), responder_(&ctx_), scope_(scope) { + grpc::ServerCompletionQueue* cq, framework::Scope* scope, + const platform::DeviceContext* dev_ctx) + : RequestBase(service, cq), + responder_(&ctx_), + scope_(scope), + dev_ctx_(dev_ctx) { service_->RequestGetVariable(&ctx_, &request_, &responder_, cq_, cq_, this); } virtual ~RequestGet() {} + virtual std::string GetReqName() { return request_.varname(); } + virtual void Process() { // proc request. std::string var_name = request_.varname(); auto* var = scope_->FindVar(var_name); - SerializeToMessage(var_name, var, platform::CPUDeviceContext(), &reply_); + SerializeToMessage(var_name, var, *dev_ctx_, &reply_); // TODO(gongwb): check var's info. responder_.Finish(reply_, grpc::Status::OK, this); + status_ = FINISH; } protected: @@ -95,11 +107,14 @@ class RequestGet final : public RequestBase { sendrecv::VariableMessage reply_; ServerAsyncResponseWriter responder_; framework::Scope* scope_; + const platform::DeviceContext* dev_ctx_; }; void AsyncGRPCServer::RunSyncUpdate() { grpc::ServerBuilder builder; builder.AddListeningPort(address_, grpc::InsecureServerCredentials()); + builder.SetMaxSendMessageSize(std::numeric_limits::max()); + builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); builder.RegisterService(&service_); cq_send_ = builder.AddCompletionQueue(); @@ -155,22 +170,10 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { if (is_shut_down_) { return; } - RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_); + RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_); VLOG(4) << "create Requestget status:" << get->Status(); } -void AsyncGRPCServer::SetFinishOrDelete(RequestBase*& last) { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - delete last; - last = NULL; - return; - } - - last->SetStatus(FINISH); - return; -} - void AsyncGRPCServer::HandleRequest(bool wait, grpc::ServerCompletionQueue* cq, std::string cq_name, std::function TryToRegisterNewOne) { @@ -184,13 +187,19 @@ void AsyncGRPCServer::HandleRequest(bool wait, grpc::ServerCompletionQueue* cq, break; } + PADDLE_ENFORCE(tag); if (wait && !done_) { Wait(); } RequestBase* base = (RequestBase*)tag; + // reference: + // https://github.com/tensorflow/tensorflow/issues/5596 + // https://groups.google.com/forum/#!topic/grpc-io/xftlRy-IQwM + // https://groups.google.com/forum/#!topic/grpc-io/ywATt88Ef_I if (!ok) { - VLOG(4) << cq_name << " recv no regular event"; + LOG(WARNING) << cq_name << " recv no regular event:argument name" + << base->GetReqName(); TryToRegisterNewOne(); delete base; continue; @@ -201,7 +210,6 @@ void AsyncGRPCServer::HandleRequest(bool wait, grpc::ServerCompletionQueue* cq, VLOG(4) << cq_name << " status:" << base->Status(); TryToRegisterNewOne(); base->Process(); - SetFinishOrDelete(base); break; } case FINISH: { diff --git a/paddle/operators/detail/grpc_server.h b/paddle/operators/detail/grpc_server.h index 041fe05b2e9c37e8a91669b8f523c47b56e14cba..2c078b77771656dc7fc0342ecf21b8d33dc11817 100644 --- a/paddle/operators/detail/grpc_server.h +++ b/paddle/operators/detail/grpc_server.h @@ -37,7 +37,7 @@ class RequestBase; class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { public: - explicit AsyncGRPCServer(std::string address) { address_ = address; } + explicit AsyncGRPCServer(const std::string &address) : address_(address) {} void RunSyncUpdate(); @@ -47,6 +47,8 @@ class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { void SetScope(framework::Scope *scope) { scope_ = scope; } + void SetDevCtx(const platform::DeviceContext *dev_ctx) { dev_ctx_ = dev_ctx; } + const MessageWithName Get() { return this->var_recv_queue_.Pop(); } void Push(const MessageWithName &msg) { this->var_recv_queue_.Push(msg); } @@ -60,7 +62,6 @@ class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { std::function TryToRegisterNewOne); void TryToRegisterNewSendOne(); void TryToRegisterNewGetOne(); - void SetFinishOrDelete(RequestBase *&last); void ShutdownQueue(); private: @@ -74,6 +75,7 @@ class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { std::string address_; framework::Scope *scope_; + const platform::DeviceContext *dev_ctx_; // received variable from RPC, operators fetch variable from this queue. SimpleBlockQueue var_recv_queue_; diff --git a/paddle/operators/elementwise_add_op.h b/paddle/operators/elementwise_add_op.h index 59abbb57d1dcfbef6ead70e4afa9a3816d60d9b5..a8389429f26c17ceab1db22175c90888546ead6f 100644 --- a/paddle/operators/elementwise_add_op.h +++ b/paddle/operators/elementwise_add_op.h @@ -28,39 +28,7 @@ template class ElementwiseAddKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using Tensor = framework::Tensor; - - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* z = ctx.Output("Out"); - z->mutable_data(ctx.GetPlace()); - TransformFunctor, T, DeviceContext> functor( - x, y, z, ctx.template device_context(), AddFunctor()); - - auto x_dims = x->dims(); - auto y_dims = y->dims(); - PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), - "Rank of first input must >= rank of second input."); - - if (x_dims == y_dims) { - functor.Run(); - return; - } - - int axis = ctx.Attr("axis"); - axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); - PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), - "Axis should be in range [0, x_dims)"); - - int pre, n, post; - get_mid_dims(x_dims, y_dims, axis, pre, n, post); - if (post == 1) { - functor.RunRowWise(n, pre); - return; - } else { - functor.RunMidWise(n, pre, post); - return; - } + ElementwiseComputeEx, DeviceContext, T>(ctx); } }; @@ -81,23 +49,6 @@ struct ElementwiseAddGradFunctor { } }; -template -struct ElementwiseAddOneGradFunctor { - template - void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) { - auto dz_e = framework::EigenVector::Flatten(*dz); - if (dx) { - auto dx_e = framework::EigenVector::Flatten(*dx); - dx_e.device(d) = dz_e; - } - if (dy) { - auto dy_e = framework::EigenVector::Flatten(*dy); - dy_e.device(d) = dz_e.sum(); - } - } -}; - template struct ElementwiseAddBroadCastGradFunctor { template { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseAddOneGradFunctor, ElementwiseAddBroadCastGradFunctor, ElementwiseAddBroadCast2GradFunctor>(ctx); } diff --git a/paddle/operators/elementwise_div_op.h b/paddle/operators/elementwise_div_op.h index 875abd313ffc8fdf910d461922ff41f65ef276e7..ef26cb6c914f50ded07cc9d0d8de3f49f2151129 100644 --- a/paddle/operators/elementwise_div_op.h +++ b/paddle/operators/elementwise_div_op.h @@ -19,11 +19,16 @@ limitations under the License. */ namespace paddle { namespace operators { +template +struct DivFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a / b; } +}; + template class ElementwiseDivKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseCompute(ctx); + ElementwiseComputeEx, DeviceContext, T>(ctx); } }; @@ -107,7 +112,6 @@ class ElementwiseDivGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseDivGradFunctor, ElementwiseDivBroadCastGradFunctor, ElementwiseDivBroadCast2GradFunctor>(ctx); } diff --git a/paddle/operators/elementwise_max_op.cc b/paddle/operators/elementwise_max_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..53c27ae5be4cbfe85ce61aa27196594ae152eea4 --- /dev/null +++ b/paddle/operators/elementwise_max_op.cc @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/elementwise_max_op.h" +#include "paddle/operators/elementwise_op.h" + +namespace paddle { +namespace operators { +class ElementwiseMaxOpMaker : public ElementwiseOpMaker { + public: + ElementwiseMaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : ElementwiseOpMaker(proto, op_checker) { + SetComment("Max", "Out = max(X, Y)"); + AddComment(comment_); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(elementwise_max, ops::ElementwiseOp, ops::ElementwiseMaxOpMaker, + elementwise_max_grad, ops::ElementwiseOpGrad); +REGISTER_OP_CPU_KERNEL( + elementwise_max, + ops::ElementwiseMaxKernel, + ops::ElementwiseMaxKernel, + ops::ElementwiseMaxKernel, + ops::ElementwiseMaxKernel); +REGISTER_OP_CPU_KERNEL( + elementwise_max_grad, + ops::ElementwiseMaxGradKernel, + ops::ElementwiseMaxGradKernel, + ops::ElementwiseMaxGradKernel, + ops::ElementwiseMaxGradKernel); diff --git a/paddle/operators/elementwise_max_op.cu b/paddle/operators/elementwise_max_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..5ff4af17477cbd35b765cc00d46c95fda620e2df --- /dev/null +++ b/paddle/operators/elementwise_max_op.cu @@ -0,0 +1,32 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/elementwise_max_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL( + elementwise_max, + ops::ElementwiseMaxKernel, + ops::ElementwiseMaxKernel, + ops::ElementwiseMaxKernel, + ops::ElementwiseMaxKernel); +REGISTER_OP_CUDA_KERNEL( + elementwise_max_grad, + ops::ElementwiseMaxGradKernel, + ops::ElementwiseMaxGradKernel, + ops::ElementwiseMaxGradKernel, + ops::ElementwiseMaxGradKernel); diff --git a/paddle/operators/elementwise_max_op.h b/paddle/operators/elementwise_max_op.h new file mode 100644 index 0000000000000000000000000000000000000000..255728e8e620665a7de225b228c19d6c510da1c8 --- /dev/null +++ b/paddle/operators/elementwise_max_op.h @@ -0,0 +1,120 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/operators/elementwise_op_function.h" + +namespace paddle { +namespace operators { + +template +struct MaxFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a > b ? a : b; } +}; + +template +class ElementwiseMaxKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + ElementwiseComputeEx, DeviceContext, T>(ctx); + } +}; + +template +struct ElementwiseMaxGradFunctor { + template + void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) { + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dz_e = framework::EigenVector::Flatten(*dz); + + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(d) = (x_e > y_e).template cast() * dz_e; + } + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(d) = (x_e <= y_e).template cast() * dz_e; + } + } +}; + +template +struct ElementwiseMaxBroadCastGradFunctor { + template + void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) { + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dz_e = framework::EigenVector::Flatten(*dz); + + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n)) + .broadcast(Eigen::DSizes(pre, 1)) + .reshape(Eigen::DSizes(x_e.size())); + + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(d) = (x_e > y_e_bcast).template cast() * dz_e; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(d) = ((x_e <= y_e_bcast).template cast() * dz_e) + .reshape(Eigen::DSizes(pre, n)) + .sum(Eigen::array{{0}}); + } + } +}; + +template +struct ElementwiseMaxBroadCast2GradFunctor { + template + void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n, + Post post) { + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dz_e = framework::EigenVector::Flatten(*dz); + + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n, 1)) + .broadcast(Eigen::DSizes(pre, 1, post)) + .reshape(Eigen::DSizes(x_e.size())); + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(d) = (x_e > y_e_bcast).template cast() * dz_e; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(d) = ((x_e <= y_e_bcast).template cast() * dz_e) + .reshape(Eigen::DSizes(pre, n, post)) + .sum(Eigen::array{{0, 2}}); + } + } +}; + +template +class ElementwiseMaxGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + ElementwiseGradCompute, + ElementwiseMaxBroadCastGradFunctor, + ElementwiseMaxBroadCast2GradFunctor>(ctx); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/elementwise_min_op.cc b/paddle/operators/elementwise_min_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..99482e1bf60c88062087c5fe0105e90aa0a8677c --- /dev/null +++ b/paddle/operators/elementwise_min_op.cc @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/elementwise_min_op.h" +#include "paddle/operators/elementwise_op.h" + +namespace paddle { +namespace operators { +class ElementwiseMinOpMaker : public ElementwiseOpMaker { + public: + ElementwiseMinOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : ElementwiseOpMaker(proto, op_checker) { + SetComment("Max", "Out = min(X, Y)"); + AddComment(comment_); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(elementwise_min, ops::ElementwiseOp, ops::ElementwiseMinOpMaker, + elementwise_min_grad, ops::ElementwiseOpGrad); +REGISTER_OP_CPU_KERNEL( + elementwise_min, + ops::ElementwiseMinKernel, + ops::ElementwiseMinKernel, + ops::ElementwiseMinKernel, + ops::ElementwiseMinKernel); +REGISTER_OP_CPU_KERNEL( + elementwise_min_grad, + ops::ElementwiseMinGradKernel, + ops::ElementwiseMinGradKernel, + ops::ElementwiseMinGradKernel, + ops::ElementwiseMinGradKernel); diff --git a/paddle/operators/elementwise_min_op.cu b/paddle/operators/elementwise_min_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..3547e6ccb77177002b1ecbee4e4604b602f72209 --- /dev/null +++ b/paddle/operators/elementwise_min_op.cu @@ -0,0 +1,32 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/elementwise_min_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL( + elementwise_min, + ops::ElementwiseMinKernel, + ops::ElementwiseMinKernel, + ops::ElementwiseMinKernel, + ops::ElementwiseMinKernel); +REGISTER_OP_CUDA_KERNEL( + elementwise_min_grad, + ops::ElementwiseMinGradKernel, + ops::ElementwiseMinGradKernel, + ops::ElementwiseMinGradKernel, + ops::ElementwiseMinGradKernel); diff --git a/paddle/operators/elementwise_min_op.h b/paddle/operators/elementwise_min_op.h new file mode 100644 index 0000000000000000000000000000000000000000..e6627a0f1bb468c8e4661b83489cb964b72dddb0 --- /dev/null +++ b/paddle/operators/elementwise_min_op.h @@ -0,0 +1,120 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/operators/elementwise_op_function.h" + +namespace paddle { +namespace operators { + +template +struct MinFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a < b ? a : b; } +}; + +template +class ElementwiseMinKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + ElementwiseComputeEx, DeviceContext, T>(ctx); + } +}; + +template +struct ElementwiseMinGradFunctor { + template + void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) { + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dz_e = framework::EigenVector::Flatten(*dz); + + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(d) = (x_e < y_e).template cast() * dz_e; + } + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(d) = (x_e >= y_e).template cast() * dz_e; + } + } +}; + +template +struct ElementwiseMinBroadCastGradFunctor { + template + void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) { + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dz_e = framework::EigenVector::Flatten(*dz); + + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n)) + .broadcast(Eigen::DSizes(pre, 1)) + .reshape(Eigen::DSizes(x_e.size())); + + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(d) = (x_e < y_e_bcast).template cast() * dz_e; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(d) = ((x_e >= y_e_bcast).template cast() * dz_e) + .reshape(Eigen::DSizes(pre, n)) + .sum(Eigen::array{{0}}); + } + } +}; + +template +struct ElementwiseMinBroadCast2GradFunctor { + template + void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n, + Post post) { + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dz_e = framework::EigenVector::Flatten(*dz); + + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n, 1)) + .broadcast(Eigen::DSizes(pre, 1, post)) + .reshape(Eigen::DSizes(x_e.size())); + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(d) = (x_e < y_e_bcast).template cast() * dz_e; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(d) = ((x_e >= y_e_bcast).template cast() * dz_e) + .reshape(Eigen::DSizes(pre, n, post)) + .sum(Eigen::array{{0, 2}}); + } + } +}; + +template +class ElementwiseMinGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + ElementwiseGradCompute, + ElementwiseMinBroadCastGradFunctor, + ElementwiseMinBroadCast2GradFunctor>(ctx); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h index 3ee50207c07fa2b7ccf2c002903a4f055dbfb352..4b86b00b5a095ae898f9ce0c17cde2cc91060ba9 100644 --- a/paddle/operators/elementwise_mul_op.h +++ b/paddle/operators/elementwise_mul_op.h @@ -18,11 +18,16 @@ limitations under the License. */ namespace paddle { namespace operators { +template +struct MulFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a * b; } +}; + template class ElementwiseMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseCompute(ctx); + ElementwiseComputeEx, DeviceContext, T>(ctx); } }; @@ -106,7 +111,6 @@ class ElementwiseMulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseMulGradFunctor, ElementwiseMulBroadCastGradFunctor, ElementwiseMulBroadCast2GradFunctor>(ctx); } diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index a342595b546bfca1a344cf8a549597df6a29adec..1a0131d8b943da3deebd0c461f78cb02b34e6dc2 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -26,9 +26,9 @@ class ElementwiseOp : public framework::OperatorWithKernel { using Tensor = framework::Tensor; void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of elementwise op should not be null"); + "Input(X) of elementwise op should not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), - "Input(Y) of elementwise op should not be null"); + "Input(Y) of elementwise op should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of elementwise op should not be null."); @@ -45,12 +45,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { public: ElementwiseOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor) The first input tensor of elementwise op"); - AddInput("Y", "(Tensor) The second input tensor of elementwise op"); - AddOutput("Out", "The output of elementwise op"); + AddInput("X", "(Tensor), The first input tensor of elementwise op."); + AddInput("Y", "(Tensor), The second input tensor of elementwise op."); + AddOutput("Out", "The output of elementwise op."); AddAttr("axis", - "(int, default -1) The starting dimension index " - "for broadcasting Y onto X") + "(int, default -1). The start dimension index " + "for broadcasting Y onto X.") .SetDefault(-1) .EqualGreaterThan(-1); comment_ = R"DOC( @@ -58,19 +58,18 @@ Limited Elementwise {name} Operator. The equation is: -.. math:: - {equation} +$${equation}$$ -X is a tensor of any dimension and the dimensions of tensor Y must be smaller than -or equal to the dimensions of X. +$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be +smaller than or equal to the dimensions of $X$. There are two cases for this operator: -1. The shape of Y is same with X; -2. The shape of Y is a subset of X. +1. The shape of $Y$ is same with $X$; +2. The shape of $Y$ is a subset of $X$. For case 2: -Y will be broadcasted to match the shape of X and axis should be -the starting dimension index for broadcasting Y onto X. +$Y$ will be broadcasted to match the shape of $X$ and axis should be +set to index of the start dimension to broadcast $Y$ onto $X$. For example .. code-block:: python @@ -81,7 +80,8 @@ For example shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 -Either of the inputs X and Y or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input X. +Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) +information. However, the output only shares the LoD information with input $X$. )DOC"; AddComment(comment_); diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 560247cb108dce5432bfe66556b9e675a3accc27..db5d30c1af286913f8decd7ab74058fd732ead65 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -311,8 +311,7 @@ EIGEN_FUNCTOR(Mul, EIGEN_MUL); EIGEN_FUNCTOR(Div, EIGEN_DIV); template + typename broadcastfunctor, typename broadcast2functor> void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { using Tensor = framework::Tensor; @@ -341,6 +340,13 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { return; } + if (y_dims.size() == 1 && y_dims[0] == 1) { + // y is a scalar + auto extended_dims = framework::vectorize(x_dims); + extended_dims.push_back(1); + x_dims = framework::make_ddim(extended_dims); + } + int axis = ctx.Attr("axis"); axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); @@ -357,5 +363,50 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { return; } } + +template +void ElementwiseComputeEx(const framework::ExecutionContext& ctx) { + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); + z->mutable_data(ctx.GetPlace()); + TransformFunctor functor( + x, y, z, ctx.template device_context(), Functor()); + + auto x_dims = x->dims(); + auto y_dims = y->dims(); + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Rank of first input must >= rank of second input."); + + if (x_dims == y_dims) { + functor.Run(); + return; + } + + if (y_dims.size() == 1 && y_dims[0] == 1) { + // y is a scalar + auto extended_dims = framework::vectorize(x_dims); + extended_dims.push_back(1); + x_dims = framework::make_ddim(extended_dims); + } + + int axis = ctx.Attr("axis"); + axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); + PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), + "Axis should be in range [0, x_dims)"); + + int pre, n, post; + get_mid_dims(x_dims, y_dims, axis, pre, n, post); + if (post == 1) { + functor.RunRowWise(n, pre); + return; + } else { + functor.RunMidWise(n, pre, post); + return; + } +} + } // namespace operators } // namespace paddle diff --git a/paddle/operators/elementwise_sub_op.h b/paddle/operators/elementwise_sub_op.h index 66edf8672d13086f883f0a2ad7ef5802317cc79a..a2aca793026189ec87e00b52d7c351689f870400 100644 --- a/paddle/operators/elementwise_sub_op.h +++ b/paddle/operators/elementwise_sub_op.h @@ -18,11 +18,16 @@ limitations under the License. */ namespace paddle { namespace operators { +template +struct SubFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a - b; } +}; + template class ElementwiseSubKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseCompute(ctx); + ElementwiseComputeEx, DeviceContext, T>(ctx); } }; @@ -43,23 +48,6 @@ struct ElementwiseSubGradFunctor { } }; -template -struct ElementwiseSubOneGradFunctor { - template - void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) { - auto dz_e = framework::EigenVector::Flatten(*dz); - if (dx) { - auto dx_e = framework::EigenVector::Flatten(*dx); - dx_e.device(d) = dz_e; - } - if (dy) { - auto dy_e = framework::EigenVector::Flatten(*dy); - dy_e.device(d) = (-1.0) * dz_e.sum(); - } - } -}; - template struct ElementwiseSubBroadCastGradFunctor { template { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseSubOneGradFunctor, ElementwiseSubBroadCastGradFunctor, ElementwiseSubBroadCast2GradFunctor>(ctx); } diff --git a/paddle/operators/expand_op.cc b/paddle/operators/expand_op.cc index 08fa91ed72aa41ed2f513c090b9085410bb5cc47..043c93654d33f7c105c89960e18ec72d3557237d 100644 --- a/paddle/operators/expand_op.cc +++ b/paddle/operators/expand_op.cc @@ -58,21 +58,21 @@ class ExpandOpMaker : public framework::OpProtoAndCheckerMaker { ExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "(Tensor, default Tensor) A tensor with rank in [1, 6]." - "X is the input tensor to be expanded."); + "(Tensor, default Tensor). A tensor with rank in [1, 6]." + "X is the input to be expanded."); AddOutput("Out", - "(Tensor, default Tensor) A tensor with rank in [1, 6]." - "The rank of Output(Out) is same as Input(X) except that each " - "dimension size of Output(Out) is equal to corresponding " - "dimension size of Input(X) multiplying corresponding value of " - "Attr(expand_times)."); + "(Tensor, default Tensor). A tensor with rank in [1, 6]." + "The rank of Output(Out) have the same with Input(X). " + "After expanding, size of each dimension of Output(Out) is equal " + "to size of the corresponding dimension of Input(X) multiplying " + "the corresponding value given by Attr(expand_times)."); AddAttr>("expand_times", "Expand times number for each dimension."); AddComment(R"DOC( Expand operator tiles the input by given times number. You should set times number for each dimension by providing attribute 'expand_times'. The rank of X -should be in [1, 6]. Please notice that size of 'expand_times' must be same with -X's rank. Following is a using case: +should be in [1, 6]. Please note that size of 'expand_times' must be the same +with X's rank. Following is a using case: Input(X) is a 3-D tensor with shape [2, 3, 1]: diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index fd59eef7d650b48feae68c89be54ec4e48cbcc7e..c607704efac86982c8c22e462381aaab488a9b69 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -13,6 +13,7 @@ if(WITH_GPU) nv_library(context_project SRCS context_project.cc context_project.cu DEPS device_context math_function) nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context tensor) nv_library(sequence_padding SRCS sequence_padding.cc sequence_padding.cu DEPS lod_tensor device_context) + nv_library(sequence_scale SRCS sequence_scale.cc sequence_scale.cu DEPS lod_tensor device_context) nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions) nv_library(maxouting SRCS maxouting.cc maxouting.cu DEPS device_context) nv_library(unpooling SRCS unpooling.cc unpooling.cu DEPS device_context) @@ -29,6 +30,7 @@ else() cc_library(context_project SRCS context_project.cc DEPS device_context math_function) cc_library(sequence2batch SRCS sequence2batch.cc DEPS device_context tensor) cc_library(sequence_padding SRCS sequence_padding.cc DEPS lod_tensor device_context) + cc_library(sequence_scale SRCS sequence_scale.cc DEPS lod_tensor device_context) cc_library(lstm_compute SRCS lstm_compute.cc DEPS device_context activation_functions) cc_library(maxouting SRCS maxouting.cc DEPS device_context) cc_library(unpooling SRCS unpooling.cc DEPS device_context) diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index 7c6f098ca9065ded1644420a3ab47911bf7bc3b3..c9f322b92e5476d889b57bcc91a0ce1d9e5339d5 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "paddle/operators/math/math_function.h" #include "gtest/gtest.h" diff --git a/paddle/operators/math/math_function_test.cu b/paddle/operators/math/math_function_test.cu index d1139ac988c0077fd3e107c6ffee0fd84c5b7041..6f16d6679248a88425e1de487988e50ee8a469bf 100644 --- a/paddle/operators/math/math_function_test.cu +++ b/paddle/operators/math/math_function_test.cu @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "gtest/gtest.h" #include "paddle/operators/math/math_function.h" diff --git a/paddle/operators/math/sequence_scale.cc b/paddle/operators/math/sequence_scale.cc new file mode 100644 index 0000000000000000000000000000000000000000..7e439e9a2cebaa5d494b185fd878e293a6895e45 --- /dev/null +++ b/paddle/operators/math/sequence_scale.cc @@ -0,0 +1,46 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/sequence_scale.h" + +namespace paddle { +namespace operators { +namespace math { + +template +class ScaleLoDTensorFunctor { + public: + void operator()(const platform::CPUDeviceContext& context, + framework::LoDTensor& seq, const T* scales) { + const size_t level = 0; + auto lod = seq.lod(); + const size_t num_seq = lod[level].size() - 1; + size_t seq_width = seq.dims()[1]; + framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); + + T* seq_data = seq.mutable_data(context.GetPlace()); + for (size_t i = 0; i < num_seq; ++i) { + for (size_t j = lod[level][i] * seq_width; + j < lod[level][i + 1] * seq_width; ++j) { + seq_data[j] *= scales[i]; + } + } + } +}; + +template class ScaleLoDTensorFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/sequence_scale.cu b/paddle/operators/math/sequence_scale.cu new file mode 100644 index 0000000000000000000000000000000000000000..ceaabd8e0fd81c927fbd4333c0aa7954b8da8513 --- /dev/null +++ b/paddle/operators/math/sequence_scale.cu @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/sequence_scale.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { + +using platform::PADDLE_CUDA_NUM_THREADS; + +template +__global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales, + const size_t seq_width) { + for (int i = threadIdx.x; + i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width; + i += BlockSize) { + int idx = lod[blockIdx.x] * seq_width + i; + seq[idx] *= scales[blockIdx.x]; + } +} + +template +class ScaleLoDTensorFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + framework::LoDTensor& seq, const T* scales) { + const size_t level = 0; + auto lod = seq.lod(); + const size_t num_seq = lod[level].size() - 1; + const size_t seq_width = seq.numel() / seq.dims()[0]; + framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); + T* seq_data = seq.mutable_data(context.GetPlace()); + + SequenceScaleKernel<<< + num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( + seq_data, abs_offset_lod[level].data(), scales, seq_width); + } +}; + +template class ScaleLoDTensorFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/sequence_scale.h b/paddle/operators/math/sequence_scale.h new file mode 100644 index 0000000000000000000000000000000000000000..ecd9a57c3f4d8d91bfb8933a0fd38355c227744d --- /dev/null +++ b/paddle/operators/math/sequence_scale.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/lod_tensor.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { + +/* + * \brief Scale a sequence. + * + * All sequences will be padded to the same length and stored in a transposed + * shape. + * Example: + * Given: + * seq = (s0, s0, s0, s0; s1, s1; s2, s2, s2; s3) + * scales = (2, 3, 4, 5) + * then: + * result = (2*s0, 2*s0, 2*s0, 2*s0; 3*s1, 3*s1; 4*s2, 4*s2, 4*s2; 5*s3) + + * + * \param context Device context of this functor. + * \param seq LoDTensor which is stored in sequence format, the shape + * is [total_sequence_length, sequence_width] where + * total_sequence_length is the sum of all sequences' + * length. + * \param scales Array. The i-th sequence will be scaled by scales[i]. + * \param num_seq Number of sequence + * + */ +template +class ScaleLoDTensorFunctor { + public: + void operator()(const DeviceContext& context, framework::LoDTensor& seq, + const T* scales); +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/net_op.cc b/paddle/operators/net_op.cc index 03302f5cbf5674dca1d22a84137579090b4d5eac..f12074a5f2d08f14b58619fc86b6aa37e4cad132 100644 --- a/paddle/operators/net_op.cc +++ b/paddle/operators/net_op.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index dfd86546e83a6276aedd198eaeb6fad2c50944df..9358f29f62fc21801f8036400d2baebdfd663a3a 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "paddle/operators/net_op.h" #include diff --git a/paddle/operators/op_documentation/batch_norm_op.md b/paddle/operators/op_documentation/batch_norm_op.md index 80948adf2b9047a9685dbdd90b2296b5a955f9c1..d1392619c42d9206bf4bddcd33ad11b033e6cbdb 100644 --- a/paddle/operators/op_documentation/batch_norm_op.md +++ b/paddle/operators/op_documentation/batch_norm_op.md @@ -66,7 +66,7 @@ As most C++ operators do, `batch_norm_op` is defined by inputs, outputs, attribu The following graph showes the training computational process of `batch_norm_op`: - + cudnn provides APIs to finish the whole series of computation, we can use them in our GPU kernel. @@ -124,7 +124,7 @@ for pass_id in range(PASS_NUM): `is_infer` is an attribute. Once an operator is created, its attributes can not be changed. It suggests us that we shall maintain two `batch_norm_op` in the model, one's `is_infer` is `True`(we call it `infer_batch_norm_op`) and the other one's is `False`(we call it `train_batch_norm_op`). They share all parameters and variables, but be placed in two different branches. That is to say, if a network contains a `batch_norm_op`, it will fork into two branches, one go through `train_batch_norm_op` and the other one go through `infer_batch_norm_op`:
- +
Just like what is shown in the above graph, the net forks before `batch_norm_op` and will never merge again. All the operators after `batch_norm_op` will duplicate. diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index e1bec0421e76143bef669a4f6fa373cdf01226b2..c2561fa2bf3aa0992f32ed1295c6640d55e6322b 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -30,16 +30,13 @@ static constexpr char kParallelScopes[] = "parallel_scopes"; static constexpr char kParallelBlock[] = "sub_block"; -// using ParallelScopeVar = std::vector; using LoDTensor = framework::LoDTensor; -using OperatorBase = framework::OperatorBase; -void SplitTensorAndMoveTensorToScopes( - const framework::Scope &scope, - const std::vector &sub_scopes, +static void SplitTensorAndMoveTensorToScopes( + const framework::Scope &scope, std::vector *sub_scopes, const std::vector &places, const std::vector &names) { - PADDLE_ENFORCE_EQ(sub_scopes.size(), places.size()); + size_t num_sub_scopes = 0; for (auto &argu : names) { auto *var = scope.FindVar(argu); const auto &tensor = var->Get(); @@ -48,9 +45,21 @@ void SplitTensorAndMoveTensorToScopes( for (auto &lod : lod_tensors) { VLOG(3) << lod.dims(); } + if (num_sub_scopes == 0) { + num_sub_scopes = lod_tensors.size(); + } else { + PADDLE_ENFORCE_EQ(num_sub_scopes, lod_tensors.size()); + } + PADDLE_ENFORCE_NE(num_sub_scopes, 0); + if (sub_scopes->size() == 0) { + sub_scopes->reserve(num_sub_scopes); + for (size_t i = 0; i < num_sub_scopes; ++i) { + sub_scopes->emplace_back(&scope.NewScope()); + } + } - for (size_t i = 0; i < sub_scopes.size(); ++i) { - *sub_scopes[i]->Var(argu)->GetMutable() = lod_tensors[i]; + for (size_t i = 0; i < lod_tensors.size(); ++i) { + *(*sub_scopes)[i]->Var(argu)->GetMutable() = lod_tensors[i]; } } } @@ -70,7 +79,7 @@ class ParallelDoOp : public framework::OperatorBase { const framework::VariableNameMap &inputs, const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) {} + : framework::OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, const platform::Place &place) const override { @@ -85,19 +94,17 @@ class ParallelDoOp : public framework::OperatorBase { auto &sub_scopes = *scope.FindVar(Output(kParallelScopes)) ->GetMutable>(); - for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) { - sub_scopes.push_back(&scope.NewScope()); - } // split input - SplitTensorAndMoveTensorToScopes(scope, sub_scopes, places, + SplitTensorAndMoveTensorToScopes(scope, &sub_scopes, places, Inputs(kInputs)); + // copy parameter for (auto ¶m : Inputs(kParameters)) { PADDLE_ENFORCE(scope.FindVar(param)->IsType(), "Only support parameter type as LoDTensor"); auto &src = scope.FindVar(param)->Get(); - for (size_t i = 0; i < places.size(); ++i) { + for (size_t i = 0; i < sub_scopes.size(); ++i) { auto &place = places[i]; auto *sub_scope = sub_scopes[i]; auto *dst = sub_scope->Var(param)->GetMutable(); @@ -108,9 +115,7 @@ class ParallelDoOp : public framework::OperatorBase { std::vector> workers; workers.reserve(places.size()); - for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) { - VLOG(3) << "Run " << place_idx; - + for (size_t place_idx = 0; place_idx < sub_scopes.size(); ++place_idx) { auto &place = places[place_idx]; auto *cur_scope = sub_scopes[place_idx]; @@ -157,21 +162,16 @@ ParallelDo Operator. } }; -class ParallelDoGradOp : public OperatorBase { +class ParallelDoGradOp : public framework::OperatorBase { public: ParallelDoGradOp(const std::string &type, const framework::VariableNameMap &inputs, const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) {} + : framework::OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, const platform::Place &place) const override { - // // get device context from pool - // platform::DeviceContextPool &pool = - // platform::DeviceContextPool::Instance(); - // auto &dev_ctx = *pool.Get(place); - auto *block = Attr(kParallelBlock); auto *program = block->Program(); @@ -181,26 +181,16 @@ class ParallelDoGradOp : public OperatorBase { auto &places = scope.FindVar(Input(kPlaces))->Get(); // feed output@grad - SplitTensorAndMoveTensorToScopes(scope, sub_scopes, places, - Inputs(framework::GradVarName(kOutputs))); + SplitTensorAndMoveTensorToScopes( + scope, const_cast *>(&sub_scopes), + places, Inputs(framework::GradVarName(kOutputs))); WaitOnPlaces(places); - // for debugging - for (auto &s : Inputs(framework::GradVarName(kOutputs))) { - VLOG(3) << s; - VLOG(3) << scope.FindVar(s)->Get(); - for (auto *sub_scope : sub_scopes) { - VLOG(3) << sub_scope->FindVar(s)->Get(); - } - } - // exe run std::vector> workers; - for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) { - VLOG(3) << "Run " << place_idx; - - auto &place = places[place_idx]; - auto *cur_scope = sub_scopes[place_idx]; + for (size_t i = 0; i < sub_scopes.size(); ++i) { + auto &place = places[i]; + auto *cur_scope = sub_scopes[i]; // execute workers.emplace_back(framework::Async([program, cur_scope, place, block] { @@ -216,33 +206,38 @@ class ParallelDoGradOp : public OperatorBase { // merge grad for (auto &s : Outputs(framework::GradVarName(kParameters))) { - VLOG(3) << "merge grad " << s; - - auto &t = sub_scopes[0]->FindVar(s)->Get(); - VLOG(3) << t; - - std::string s_buf = s + "@BUF"; - auto *t_buf = sub_scopes[0]->Var(s_buf)->GetMutable(); - - for (size_t place_idx = 1; place_idx < places.size(); ++place_idx) { - auto &tt = sub_scopes[place_idx]->FindVar(s)->Get(); - VLOG(3) << place_idx; - VLOG(3) << tt; - framework::Copy(tt, places[0], t_buf); + auto &result = sub_scopes[0]->FindVar(s)->Get(); + std::string tmp_name; + auto *tmp = sub_scopes[0]->Var(&tmp_name)->GetMutable(); + + for (size_t i = 1; i < sub_scopes.size(); ++i) { + auto &tensor_to_merge = sub_scopes[i]->FindVar(s)->Get(); + if (!(places[i] == places[0])) { + framework::Copy(tensor_to_merge, places[0], tmp); + } else { + tmp->ShareDataWith(tensor_to_merge); + } auto sum_op = framework::OpRegistry::CreateOp( - "sum", {{"X", {s, s_buf}}}, {{"Out", {s}}}, + "sum", {{"X", {s, tmp_name}}}, {{"Out", {s}}}, framework::AttributeMap{}); sum_op->Run(*sub_scopes[0], places[0]); WaitOnPlaces(places); } - VLOG(3) << t; - framework::Copy(t, place, scope.FindVar(s)->GetMutable()); + VLOG(3) << result; + framework::Copy(result, place, scope.FindVar(s)->GetMutable()); } } }; +std::ostream &operator<<(std::ostream &sout, + const std::vector &strs) { + std::copy(strs.begin(), strs.end(), + std::ostream_iterator(sout, ",")); + return sout; +} + class ParallelDoGradOpDescMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; @@ -283,18 +278,30 @@ class ParallelDoGradOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { std::vector input{kParameters, kInputs}; std::vector output{kOutputs}; - for (auto &s : input) { - PADDLE_ENFORCE(ctx->HasInputs(s)); - PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(s)), - "Cannot find the gradient variable %s", - framework::GradVarName(s)); - } + + PADDLE_ENFORCE(ctx->HasInputs(kParameters)); + PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters))); + PADDLE_ENFORCE(ctx->HasInput(kInputs)); + for (auto &s : output) { PADDLE_ENFORCE(ctx->HasInputs(s)); } - for (auto &s : input) { - ctx->SetOutputsDim(framework::GradVarName(s), ctx->GetInputsDim(s)); + + ctx->SetOutputsDim(framework::GradVarName(kParameters), + ctx->GetInputsDim(kParameters)); + + auto i_dims = ctx->GetInputsDim(kInputs); + auto ig_names = ctx->Outputs(framework::GradVarName(kInputs)); + + for (size_t i = 0; i < ig_names.size(); ++i) { + auto &ig_name = ig_names[i]; + if (ig_name == framework::kEmptyVarName) { + continue; + } + + ctx->SetDims({ig_name}, {i_dims[i]}); } + if (ctx->HasInputs(kParameters)) { PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters))); ctx->SetOutputsDim(framework::GradVarName(kParameters), diff --git a/paddle/operators/print_op.cc b/paddle/operators/print_op.cc index 89e41d806c7661a3e61e0a944a2a980704297dd9..8b233d64c904a8870212af33c5839cfc555b5dc8 100644 --- a/paddle/operators/print_op.cc +++ b/paddle/operators/print_op.cc @@ -16,12 +16,17 @@ #include #include "paddle/framework/op_registry.h" +#include "paddle/framework/variable.h" namespace paddle { namespace operators { #define CLOG std::cout +const std::string kForward = "FORWARD"; +const std::string kBackward = "BACKWARD"; +const std::string kBoth = "BOTH"; + struct Formater { std::string message; std::string name; @@ -122,40 +127,77 @@ class TensorPrintOp : public framework::OperatorBase { TensorPrintOp(const TensorPrintOp& o) : framework::OperatorBase( static_cast(o)) { - PADDLE_THROW("Not implemented"); + PADDLE_THROW("Not implemented."); } void Run(const framework::Scope& scope, const platform::Place& place) const override { - // Only run the `first_n` times. + const framework::Variable* in_var_ptr = nullptr; + std::string phase = kForward; + std::string printed_var_name = ""; + + auto& inputs = Inputs(); + if (inputs.find("In") != inputs.end() && !Inputs("In").empty()) { + in_var_ptr = scope.FindVar(Input("In")); + printed_var_name = Inputs("In").front(); + } else if (inputs.find("In@GRAD") != inputs.end() && + !Inputs("In@GRAD").empty()) { + in_var_ptr = scope.FindVar(Input("In@GRAD")); + printed_var_name = Inputs("In@GRAD").front(); + phase = kBackward; + } else { + PADDLE_THROW("Unknown phase, should be forward or backward."); + } + + PADDLE_ENFORCE_NOT_NULL(in_var_ptr); + + auto& in_tensor = in_var_ptr->Get(); + auto* out_var_ptr = scope.FindVar(Output("Out")); + auto& out_tensor = *out_var_ptr->GetMutable(); + + // Just copy data from input tensor to output tensor + // output tensor share same memory with input tensor + out_tensor.ShareDataWith(in_tensor); + out_tensor.set_lod(in_tensor.lod()); + + std::string print_phase = Attr("print_phase"); + if (print_phase != phase && print_phase != kBoth) { + return; + } + int first_n = Attr("first_n"); if (first_n > 0 && ++times_ > first_n) return; - PADDLE_ENFORCE(!Inputs("input").empty(), "input should be set"); - auto* input_var = scope.FindVar(Input("input")); - PADDLE_ENFORCE_NOT_NULL(input_var); - auto& tensor = input_var->Get(); + framework::LoDTensor printed_tensor; + printed_tensor.set_lod(in_tensor.lod()); + printed_tensor.Resize(in_tensor.dims()); - // TODO(ChunweiYan) support GPU - PADDLE_ENFORCE(platform::is_cpu_place(tensor.place())); + if (platform::is_cpu_place(in_tensor.place())) { + printed_tensor.ShareDataWith(in_tensor); + } else { + // copy data to cpu to print + platform::CPUPlace place; + framework::Copy(in_tensor, place, &printed_tensor); + } Formater formater; if (Attr("print_tensor_name")) { - formater.name = Inputs("input").front(); + formater.name = printed_var_name; } if (Attr("print_tensor_type")) { - formater.dtype = tensor.type(); + formater.dtype = printed_tensor.type(); } if (Attr("print_tensor_shape")) { - formater.dims.assign(tensor.dims()[0], - tensor.dims()[tensor.dims().size() - 1]); + auto& dims = printed_tensor.dims(); + formater.dims.resize(dims.size()); + for (int i = 0; i < dims.size(); ++i) formater.dims[i] = dims[i]; } if (Attr("print_tensor_lod")) { - formater.lod = tensor.lod(); + formater.lod = printed_tensor.lod(); } formater.summarize = Attr("summarize"); - formater.data = (void*)tensor.data(); - formater(tensor.numel()); + formater.data = (void*)printed_tensor.data(); + formater(printed_tensor.numel()); } private: @@ -166,27 +208,46 @@ class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker { public: PrintOpProtoAndCheckMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("input", "the tensor that will be displayed."); + AddInput("In", "Input tensor to be displayed."); AddAttr("first_n", "Only log `first_n` number of times."); AddAttr("message", "A string message to print as a prefix."); - AddAttr("summarize", "Print this number of elements in the tensor."); + AddAttr("summarize", "Number of elements printed."); AddAttr("print_tensor_name", "Whether to print the tensor name."); AddAttr("print_tensor_type", "Whether to print the tensor's dtype."); AddAttr("print_tensor_shape", "Whether to print the tensor's shape."); AddAttr("print_tensor_lod", "Whether to print the tensor's lod."); + AddAttr( + "print_phase", + "(string, default 'BOTH') Which phase to display including 'FORWARD' " + "'BACKWARD' and 'BOTH'.") + .SetDefault(kBoth) + .InEnum({kForward, kBackward, kBoth}); + AddOutput("Out", "Output tensor with same data as input tensor."); AddComment(R"DOC( - Creates a print op that will print when a tensor is accessed. +Creates a print op that will print when a tensor is accessed. - Wraps the tensor passed in so that whenever that a tensor is accessed, - the message `message` is printed, along with the current value of the - tensor `t`.)DOC"); +Wraps the tensor passed in so that whenever that a tensor is accessed, +the message `message` is printed, along with the current value of the +tensor `t`.)DOC"); } }; -class InferShape : public framework::InferShapeBase { +class InferShapeForward : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* context) const override { - PADDLE_ENFORCE(context->HasInput("input"), "input should be set"); + PADDLE_ENFORCE(context->HasInput("In"), "Input(In) should not be null."); + context->ShareLoD("In", /*->*/ "Out"); + context->SetOutputDim("Out", context->GetInputDim("In")); + } +}; + +class InferShapeBackward : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* context) const override { + PADDLE_ENFORCE(context->HasInput("In@GRAD"), + "Input(In@GRAD) should not be null."); + context->ShareLoD("In@GRAD", /*->*/ "Out"); + context->SetOutputDim("Out", context->GetInputDim("In@GRAD")); } }; @@ -196,11 +257,27 @@ class InferVarType : public framework::VarTypeInference { framework::BlockDesc* block) const override {} }; +class PrintOpProtoAndCheckGradOpMaker + : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto* op_desc_ptr = new framework::OpDesc(); + op_desc_ptr->SetType("print_grad"); + op_desc_ptr->SetInput("In@GRAD", OutputGrad("Out")); + op_desc_ptr->SetOutput("Out", InputGrad("In")); + op_desc_ptr->SetAttrMap(Attrs()); + return std::unique_ptr(op_desc_ptr); + } +}; + } // namespace operators } // namespace paddle -REGISTER_OPERATOR(print, paddle::operators::TensorPrintOp, - paddle::operators::PrintOpProtoAndCheckMaker, - paddle::operators::InferShape, - paddle::operators::InferVarType, - paddle::framework::EmptyGradOpMaker); +namespace ops = paddle::operators; + +REGISTER_OPERATOR(print, ops::TensorPrintOp, ops::PrintOpProtoAndCheckMaker, + ops::PrintOpProtoAndCheckGradOpMaker, ops::InferShapeForward, + ops::InferVarType); +REGISTER_OPERATOR(print_grad, ops::TensorPrintOp, ops::InferShapeBackward); diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index 55b33343af43802e1b6b95a32603bfee806c9764..f9ed7516826319da422fbb0af4e5c277afa7ae40 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -87,7 +87,12 @@ class RecvOp : public framework::OperatorBase { const platform::Place &dev_place) const override { // FIXME(typhoonzero): no new scopes for every run. framework::Scope &recv_scope = scope.NewScope(); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + + // FIXME(Yancey1989): initialize rpc server with laze mode. rpc_service_->SetScope(&recv_scope); + rpc_service_->SetDevCtx(&dev_ctx); auto param_list = Attr>("ParamList"); auto grad_list = Attr>("GradList"); auto trainer_count = Attr("Trainers"); @@ -96,6 +101,8 @@ class RecvOp : public framework::OperatorBase { rpc_service_->Reset(); // TODO(typhoonzero): change this to a while_op for every cluster-batch. bool exit_flag = false; + VLOG(4) << "param_count:" << param_count + << " trainer_count:" << trainer_count; while (!exit_flag) { // TODO(gognwb): simply this loop. // Get from multiple trainers, we don't care about order in which @@ -134,9 +141,6 @@ class RecvOp : public framework::OperatorBase { } auto *var = recv_scope.Var(grad_var_name); - platform::DeviceContextPool &pool = - platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(dev_place); detail::DeserializeFromMessage(v.second, dev_ctx, var); } diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 172d28bb3b647901d4de7bc03c9de21e3468a364..09b7091358e65221374a604122b742d763cfbafc 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -129,7 +129,7 @@ If reduce_all is true, just reduce along all dimensions and output a scalar. } void SetComment(std::string name, std::string op) { - Replace(comment_, "{ReduceOP}", name); + Replace(comment_, "{ReduceOp}", name); Replace(comment_, "{reduce}", op); } }; diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index 4d145250bdc73607c8817e20fdb753f4c96e2391..7c81a9524d6609a65b3167d95053bf4e85eef0db 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -33,13 +33,13 @@ class SendOp : public framework::OperatorBase { : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope& scope, - const platform::Place& dev_place) const override { + const platform::Place& place) const override { auto ins = Inputs("X"); auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); - // FIXME(gongwb): DeviceContext? - auto ctx = platform::CPUDeviceContext(); + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + auto& ctx = *pool.Get(place); for (size_t i = 0; i < ins.size(); i++) { client_.AsyncSendVariable(epmap[i], ctx, scope, ins[i]); } @@ -48,7 +48,7 @@ class SendOp : public framework::OperatorBase { client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } - client_.wait(); + PADDLE_ENFORCE(client_.Wait()); } private: diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index ade94b40bed91c64d3074036c067de34323bdaa7..bf870115a4d7b6f4d578df7707826973d4363ba6 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -138,6 +138,7 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { math::set_constant(dev_ctx, &rest_tensor, 0.0f); } } + dx_tensor.set_lod(x_tensor.lod()); } }; diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc index 4dfae043cb1091c9491d89aec4d1415d4741e013..8d55ae5dd7b0e76acb9f21cb10b79cb7aca18a8d 100644 --- a/paddle/operators/split_op.cc +++ b/paddle/operators/split_op.cc @@ -60,6 +60,12 @@ class SplitOp : public framework::OperatorWithKernel { } } ctx->SetOutputsDim("Out", outs_dims); + if (axis != 0) { + // Only pass LoD when not spliting along the first dim. + for (size_t i = 0; i < outs_number; ++i) { + ctx->ShareLoD("X", "Out", 0, i); + } + } } }; diff --git a/paddle/operators/warpctc_op.h b/paddle/operators/warpctc_op.h index 41899c7fe0c3089c4fc7c160c8896dec0e3cd6dd..8aea061c00cc9614db37ed408b6d330ef707d1cf 100644 --- a/paddle/operators/warpctc_op.h +++ b/paddle/operators/warpctc_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/sequence_padding.h" +#include "paddle/operators/math/sequence_scale.h" #include "paddle/platform/dynload/warpctc.h" namespace paddle { @@ -178,11 +179,14 @@ class WarpCTCKernel : public framework::OpKernel { T* warpctc_grad_data = warpctc_grad->mutable_data(warpctc_logits.dims(), ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), warpctc_grad, + static_cast(0)); + // warpctc accesses labels in CPU memory Tensor warpctc_label; Copy(*label, platform::CPUPlace(), ctx.device_context(), &warpctc_label); const int* warpctc_label_data = warpctc_label.data(); - // warpctc stores loss in CPU memory Tensor warpctc_loss; T* warpctc_loss_data = @@ -206,11 +210,18 @@ class WarpCTCGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* warpctc_grad = ctx.Input("WarpCTCGrad"); auto* logits_grad = ctx.Output(framework::GradVarName("Logits")); + const Tensor* loss_grad = ctx.Input(framework::GradVarName("Loss")); + logits_grad->mutable_data(ctx.GetPlace()); bool norm_by_times = ctx.Attr("norm_by_times"); math::UnpaddingLoDTensorFunctor()( ctx.template device_context(), *logits_grad, *warpctc_grad, norm_by_times); + + const T* loss_grad_data = loss_grad->data(); + math::ScaleLoDTensorFunctor()( + ctx.template device_context(), *logits_grad, + loss_grad_data); } }; diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index 7a3400919efe6f3bed40e45a245b556beab6fce4..2fdd25dbbe68659f8a0a9da13a87148ed259127a 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -121,8 +121,8 @@ class WhileGradOp : public framework::OperatorBase { for (size_t i = 0; i < outside_og_names.size(); ++i) { auto outside_og_name = outside_og_names[i]; auto inside_og_name = inside_og_names[i]; - VLOG(10) << "Linking outside " << outside_og_name << " --> inside " - << inside_og_name; + VLOG(8) << "Linking outside " << outside_og_name << " --> inside " + << inside_og_name; auto &og_outside = detail::Ref(scope.FindVar(outside_og_name), "Cannot find Outside Gradient %s", outside_og_name); @@ -141,11 +141,11 @@ class WhileGradOp : public framework::OperatorBase { auto &outside_array = og_outside.Get(); auto &inside_array = detail::Ref(og_inside.GetMutable()); - VLOG(10) << outside_og_name << " size = " << outside_array.size(); + VLOG(8) << outside_og_name << " size = " << outside_array.size(); inside_array.resize(outside_array.size()); for (size_t j = 0; j < inside_array.size(); ++j) { - VLOG(10) << j << " " << outside_array[j].numel(); + VLOG(8) << j << " " << outside_array[j].numel(); if (outside_array[j].numel() != 0) { inside_array[j].set_lod(outside_array[j].lod()); inside_array[j].ShareDataWith(outside_array[j]); @@ -187,10 +187,14 @@ class WhileGradOp : public framework::OperatorBase { attrs["shape"] = framework::vectorize2int(inside_tensor.dims()); attrs["value"] = 0.0f; + auto var_name = pg_names[param_id]; auto zero_op = framework::OpRegistry::CreateOp( "fill_constant", framework::VariableNameMap{}, - {{"Out", {pg_names[param_id]}}}, attrs); + {{"Out", {var_name}}}, attrs); zero_op->Run(scope, dev_place); + scope.FindVar(var_name) + ->GetMutable() + ->set_lod(inside_tensor.lod()); } } @@ -231,7 +235,7 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { auto igs = InputGrad(kX, /*do not drop empty gradient*/ false); for (auto &each_ig : igs) { if (inner_op_outputs.find(each_ig) == inner_op_outputs.end()) { - VLOG(10) << "Ignore " << each_ig; + VLOG(8) << "Ignore " << each_ig; each_ig = framework::kEmptyVarName; } } diff --git a/paddle/optimizer/lr_policy.h b/paddle/optimizer/lr_policy.h index bbb1ee48214cecdc6b6cd2a400cc9d12d5e8b64a..9a44a776f2b032bc2c3452a739caf0994e25891b 100644 --- a/paddle/optimizer/lr_policy.h +++ b/paddle/optimizer/lr_policy.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #pragma once #include diff --git a/paddle/optimizer/parameter_optimizer_test.cc b/paddle/optimizer/parameter_optimizer_test.cc index 83757a391784453341f22eca73bc73c14ce4174f..795d2de1d65b3a25c312d0c4e31e0105922838df 100644 --- a/paddle/optimizer/parameter_optimizer_test.cc +++ b/paddle/optimizer/parameter_optimizer_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. diff --git a/paddle/optimizer/serialization_test.cc b/paddle/optimizer/serialization_test.cc index 940e941e9042d8a37363311867df5bb477b3dac0..0f1b14eec135ab37a599965965e2e9d8bb65b90c 100644 --- a/paddle/optimizer/serialization_test.cc +++ b/paddle/optimizer/serialization_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. diff --git a/paddle/optimizer/tensor.h b/paddle/optimizer/tensor.h index 86fa625e01b981f0377bd699d191fc865ee89784..e999e9bda129a85306f73afc75f3936fefb4c006 100644 --- a/paddle/optimizer/tensor.h +++ b/paddle/optimizer/tensor.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #pragma once /** * @brief tensor used by optimizer diff --git a/paddle/platform/cpu_info_test.cc b/paddle/platform/cpu_info_test.cc index 8fb195aa7c0a41b7417ff5cf63394046e9c72267..1bfe62c1fb667e17d7383cf0a1b2632043c72743 100644 --- a/paddle/platform/cpu_info_test.cc +++ b/paddle/platform/cpu_info_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "paddle/platform/cpu_info.h" #include "paddle/string/printf.h" diff --git a/paddle/platform/hostdevice.h b/paddle/platform/hostdevice.h index eb2df291cceef553d6422e6166e1fef2c63e2a47..fa4659ed2988a2199d8a8450f825d31fd0ca5907 100644 --- a/paddle/platform/hostdevice.h +++ b/paddle/platform/hostdevice.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #pragma once #ifdef __CUDACC__ diff --git a/paddle/platform/place_test.cc b/paddle/platform/place_test.cc index 4f1eba01df5531529ad3c79648b5e7f8651df619..150b2d3b1fbacec18ea33156f30f1c9965aedb31 100644 --- a/paddle/platform/place_test.cc +++ b/paddle/platform/place_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "paddle/platform/place.h" #include #include "gtest/gtest.h" diff --git a/paddle/pybind/print_operators_doc.cc b/paddle/pybind/print_operators_doc.cc index f4f281229e611a6c9c8e9ecd54e0097ab683bbf3..99694fa592059d979297b72748125d02b2dd70a3 100644 --- a/paddle/pybind/print_operators_doc.cc +++ b/paddle/pybind/print_operators_doc.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include // std::stringstream #include diff --git a/paddle/scripts/cluster_train/paddle.py b/paddle/scripts/cluster_train/paddle.py index 9b03ed1d8f6a28259a6cb45f096575b5f3d27ca7..e44bb4505b924b3a955bb5e740aa498f253d7556 100644 --- a/paddle/scripts/cluster_train/paddle.py +++ b/paddle/scripts/cluster_train/paddle.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/python # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # diff --git a/paddle/scripts/cpplint.py b/paddle/scripts/cpplint.py index dff4339ea33b72e22104a56183e3302067dc583d..d0cbb070c432f375430de261fd0eb3233fe26df7 100644 --- a/paddle/scripts/cpplint.py +++ b/paddle/scripts/cpplint.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python # # Copyright (c) 2009 Google Inc. All rights reserved. diff --git a/paddle/string/piece.cc b/paddle/string/piece.cc index b80afdec82d642fd3a8245b96ce1bb2bea17cbae..2a553e2832014f4de00c5613d9174edac69167a6 100644 --- a/paddle/string/piece.cc +++ b/paddle/string/piece.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. diff --git a/paddle/string/piece.h b/paddle/string/piece.h index 7362ce02c7c80e121218fab77d87696403b1c5e8..fc95263379280221256dd0f696d2cb9999721d86 100644 --- a/paddle/string/piece.h +++ b/paddle/string/piece.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. diff --git a/paddle/string/piece_test.cc b/paddle/string/piece_test.cc index cf5152ff5a3cb0a2afae0c90b787abf291122fa3..fb8b9729880a01ee2e597a575d7da15313a471d9 100644 --- a/paddle/string/piece_test.cc +++ b/paddle/string/piece_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. diff --git a/paddle/string/printf.h b/paddle/string/printf.h index 8b5ce63a8e8dfe77962ff1e7415911d381a28aac..70d2511531698b03807d751dcec907ba857354f2 100644 --- a/paddle/string/printf.h +++ b/paddle/string/printf.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. diff --git a/paddle/string/printf_test.cc b/paddle/string/printf_test.cc index 2586264046a2e2ba24b0908c1f6eba163cdef448..b5ad35513bdcbccd5dbf951a3a8e5422f6424b26 100644 --- a/paddle/string/printf_test.cc +++ b/paddle/string/printf_test.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "paddle/string/printf.h" #include diff --git a/paddle/string/tinyformat/tinyformat.h b/paddle/string/tinyformat/tinyformat.h index 3516777d9f9669c1e1300b9136c26e61f65b14a7..092c04c3153dfcec4c7c577ea1ac4e8a4a1359a0 100644 --- a/paddle/string/tinyformat/tinyformat.h +++ b/paddle/string/tinyformat/tinyformat.h @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // tinyformat.h // Copyright (C) 2011, Chris Foster [chris42f (at) gmail (d0t) com] // diff --git a/paddle/string/to_string.h b/paddle/string/to_string.h index 3b3bcc69a478045156225728236174fd601461dd..178edc18951f94291a63566516df36956f25f67b 100644 --- a/paddle/string/to_string.h +++ b/paddle/string/to_string.h @@ -15,9 +15,15 @@ limitations under the License. */ #pragma once #include #include +#include namespace paddle { namespace string { +inline std::ostream& operator<<(std::ostream& s, const std::type_index& t) { + s << t.name(); + return s; +} + template inline std::string to_string(T v) { std::ostringstream sout; @@ -25,6 +31,11 @@ inline std::string to_string(T v) { return sout.str(); } +template <> +inline std::string to_string(std::type_index t) { + return t.name(); +} + // Faster std::string/const char* type template <> inline std::string to_string(std::string v) { diff --git a/paddle/trainer/tests/simple_sparse_neural_network.py b/paddle/trainer/tests/simple_sparse_neural_network.py index 30346ef299d0bc8585ccff7f2fc4885b0d9f9dfc..ba554d5872d5a6e94526eb4d09f62a4f2f21ff30 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network.py +++ b/paddle/trainer/tests/simple_sparse_neural_network.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=17, learning_method=AdaGradOptimizer(), learning_rate=1e-4) diff --git a/paddle/trainer/tests/simple_sparse_neural_network_dp.py b/paddle/trainer/tests/simple_sparse_neural_network_dp.py index 86b272edfe1bbb23c45cffe282f6475ceaa0cc41..44e96873f0d38b4349f893b9f3daf53f893869cc 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network_dp.py +++ b/paddle/trainer/tests/simple_sparse_neural_network_dp.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer.PyDataProvider2 import provider, integer_sequence, integer_value import random diff --git a/paddle/utils/enable_virtualenv.py b/paddle/utils/enable_virtualenv.py index ccfaa7c147b2ce25cb6007aa04cfc33961b7e10b..29f8deb32455a49baad0ab5a2b5fab0a62917ec3 100644 --- a/paddle/utils/enable_virtualenv.py +++ b/paddle/utils/enable_virtualenv.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import os diff --git a/proto/OptimizerConfig.proto b/proto/OptimizerConfig.proto index d27b1bcf80045216a5807812d39f7a248a956076..b341d78d194ddcbab265084db62752bc53e1b709 100644 --- a/proto/OptimizerConfig.proto +++ b/proto/OptimizerConfig.proto @@ -1,3 +1,16 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto2"; option optimize_for = LITE_RUNTIME; diff --git a/python/paddle/trainer_config_helpers/evaluators.py b/python/paddle/trainer_config_helpers/evaluators.py index 95797fba8f67bacb421f5c2813ad6332bc53cbc9..0eeaf7eabb179f19d2af8dafe821f7baa153fead 100644 --- a/python/paddle/trainer_config_helpers/evaluators.py +++ b/python/paddle/trainer_config_helpers/evaluators.py @@ -16,13 +16,22 @@ from paddle.trainer.config_parser import * from default_decorators import * __all__ = [ - "evaluator_base", "classification_error_evaluator", "auc_evaluator", - "pnpair_evaluator", "precision_recall_evaluator", "ctc_error_evaluator", - "chunk_evaluator", "sum_evaluator", "column_sum_evaluator", - "value_printer_evaluator", "gradient_printer_evaluator", - "maxid_printer_evaluator", "maxframe_printer_evaluator", - "seqtext_printer_evaluator", "classification_error_printer_evaluator", - "detection_map_evaluator" + "evaluator_base", + "classification_error_evaluator", + "auc_evaluator", + "pnpair_evaluator", + "precision_recall_evaluator", + "ctc_error_evaluator", + "chunk_evaluator", + "sum_evaluator", + "column_sum_evaluator", + "value_printer_evaluator", + "gradient_printer_evaluator", + "maxid_printer_evaluator", + "maxframe_printer_evaluator", + "seqtext_printer_evaluator", + "classification_error_printer_evaluator", + "detection_map_evaluator", ] diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_layers.py b/python/paddle/trainer_config_helpers/tests/configs/img_layers.py index 01d31ef3fad827bfd103ee00f4ddd1bde14e0f82..c944a96042a96401b0309e123077041724f60246 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/img_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/img_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-3, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py b/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py index 91849b40a0801b07642f96c061755597cd2ec073..27b11ffdfc7c7f8171c96997cf7fb50fe6b7667d 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-3, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py index f87237f9b59a833825841bcdd605c2332c2d5941..6a900518272d380fa4827161fdf626e5fa2e8fae 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py +++ b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py b/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py index 7012dbf6a0b70957d6227d4125f4cd75b9abb215..06115d62e775d843eeca9d11e1def910399fec19 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py +++ b/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. ''' Test all activations. ''' diff --git a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py index a607a62c99f69ac4921a465a20f00b6413b31c8e..f5e90fdd89912d103e7404e10e1a3e8762d7c43f 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py +++ b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/projections.py b/python/paddle/trainer_config_helpers/tests/configs/projections.py index dc8975cb311582a621eb4a5a166ddc34348fe3e9..c683d378caad278db0d0bcabdd4c7f2c5c62ace7 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/projections.py +++ b/python/paddle/trainer_config_helpers/tests/configs/projections.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. ''' Test mixed layer, projections and operators. ''' diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py index 7c848ef3fcd63314bfe91db6ebac406ba8758998..bf90d1762c89e96cfdd60ee9729cf33d2c7625c1 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py b/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py index c19bb9685aa24c4d66e4f0bbbcb004507413dbe8..7cfab838552ab01c55bd8a9cc8eb98f79e81cc68 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py index 565e281a6e1deff18aa48f97eb2f0e39ca79752f..8a425c7062ef5e09c1da7d9d1e26481276eafd95 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py b/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py index a5b5bb30b1d21aaa0c90868af7b5138e8a81aab1..8ee213a493596c24b7264eb8e2e34f87c2502006 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-4) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py b/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py index a991b22252ba10eed895efd931108c2d8b0e52f1..cbd3c3e97f8c31fc0d0eb6778fbbc4a1f09b7358 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-4) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py b/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py index cd7f609638e384314177d653e46ecf7a4b41a12f..bed9154fe3ed634bb65e994145c1b06c4ee88aa4 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-4) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py index be83f4f83c5d05ea2ffd9e3df0c09fb1a37a3e57..7e1da753f535e9c7c908f24250a4bfa932b3991d 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py index f066fe1fb30877bf40bb6299d35546f7427989a5..0a719b073540dc3f79b827d1f9fa1a20fc23c39a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py b/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py index 9b791a0222dd60e9ae2fca8b2798cddd13ed1d1c..7003872700bca22ccf9653172e3bcc6411f90425 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py index aa0a2c0d5fe19b6c414acd708bb6e82d9fb6568f..fb2cacd4433f085241bfdc00c27c8c41f992daa9 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py index 7ce375c708af7b0b7ae1d700dedbdb6a4ce16c7f..a8b5c860ef5abf2c12f8d93c52fba24a9df818e7 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py index caa6aaa9430ffaee7ade93ee04ec90103bf8cf43..eba2e1e4834813be6e5771af0c61c0b54889eb45 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_crop.py b/python/paddle/trainer_config_helpers/tests/configs/test_crop.py index 8314a7e9a5586647c70ff010156817110919c72b..870388faf74bed7e749b944a5bc44304cc098f9e 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_crop.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_crop.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py b/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py index 4a5bdf1181dc4538418a8b89b41a1ff713e423c8..253244dcd42a04b3212f4400ddd1b5320ce3a229 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python #coding=utf-8 diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py index a113279fc17b49ad01b8860b61180af0f35694fb..db950093b37a0ec0fa6f4e4fd0dcb5d8270c220b 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py index 3572a2cb07d95ffaec261bdc63492ade734ea8b9..d304a2985916b9a9508706b712b86258b1236a77 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py index e52d48dde0084aacd3f7874cc384d59287a0c7d5..2e5dde2da231908462bad73700c6ac5b8bfc233e 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * vec1 = data_layer(name='vector1', size=10) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py index c53f10e0a410b27d86b2415d98178c4790e0b0ba..345fb2b6aba24b70fcab48a1a9fe466b28cbbfb5 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py b/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py index b249de0fee3c8ca4ad0520872fa2497c493d31b5..3a489a39da1f6408dbbef02546371f12e4340b62 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='data', size=1024) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_fc.py b/python/paddle/trainer_config_helpers/tests/configs/test_fc.py index 2842d3429c9c917845f8f4c33d3618608d40291d..90b0e37270f4f00e19dab35d5baa36a9902c6019 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_fc.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_fc.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py index 9dab45519c65b0ca686558ec7fe2064bb9ad8824..2bd4ab2da4e98726a0f35186c3582d4623ca4779 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='input', size=256) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py index 474e4f36bad7eab13251afe265d1a7d107549efd..451909ee183ec4760613b168420433bfc919ebb8 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-4) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py b/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py index dff1c535b3e84e14d0e7c343efe911f19872280a..3ebe40aadc7fb89016ee632d3ff19dc9a4b52fce 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py index 171da10f75dae03eed7e110d0efd07d6a18e1ecf..c762467febc5c0fc29437ca07d3793d971f12045 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python #coding=utf-8 from paddle.trainer_config_helpers import * diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py index b36a5c6d1222860ee4b77f89ad4b6148ccd89589..58bf3de104641cb69b31fd64621f72a5df663408 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * outputs( diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py index 7ca1cc2db365dedda5d9673cafaa851a464a7b6b..8d570706dfbf672933b1f6ab78b95f19bc1033f5 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py index eb14270baa0c4ca0b84d2121a80fde0b45eda54a..3b6117d297a2942fda822d5eb71e54a3fc989d06 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py index c3376c47bded5a3aad15331936a61e12ac883b17..083d0643678bbc53429b9722f33427e5e21df2bb 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py index d250001932547d63a70de05940957f90cc014dfb..9c1445584196985759bace73a9b9ba7b91f1870a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py index b7a15666f0a5b863cbafec5f73dcfe0b9db2e0c7..046698fb4e2a17c738904dd90900d077285ec8e8 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_pad.py b/python/paddle/trainer_config_helpers/tests/configs/test_pad.py index 491e8c8caab38eb7c24e5461107ab5a9d63b12ef..1046db2f098b7eda5f428ad92d740338e791f780 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_pad.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_pad.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py index 0dbb921d41986e711d5b8b31caab1f8b6bdc47b8..37805d43767ba9fb9f7a2e9790332275b0475b1a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=100, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py index 45b02fbf325bb63b057bbbf64d59af8debf0bc9d..10d759f6d90a70aad50561506f9bf49ae9737441 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300, height=10, width=10) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py index 8da26ff44b19d0c18efae201a3b39002555d2605..22e0ce3e5acf33291ce71c5ecd622ec378e35b85 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py b/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py index 1a693f8dff06dec6e71eeb488da9c807c35e4c9b..d1d97f1c5e1373c677871aa7faf3e29b8c67e68c 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py index 004e2a5dd4efa9feab7619643673b37fe28146c5..6818b91f969c3993cf260e7472942937fe3b9660 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py index 09a6f507338c1da8e9ce60555f8ca2576704170c..ce8a22ebb170428e97905e941f443b43cff9ee42 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py index 91010759e4847f087eb4e05ad98ae794a2129365..79dad5e2508395555bc497c66c7632906fc5cbe0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py index b739a81b8505c94a2312ac735647fb114982f1f7..264341f899e161a4fea0c4ca2ae736f4154a703f 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='data', size=3 * 14 * 14, height=14, width=14) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py b/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py index ab33c496b0663d8472ce4b272be6c5cecbcfc978..342a5029a834325d3851c2f127d3f520e2a91a58 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py index ac8badb26a40e96e75225e6f61aa536cd28e9098..9521fa6c471a1e6f396c36553cdcd4602b4c5901 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py index dd589116fa9932144ca066d3fa4c929d1433a7f1..698d19d037580ea00911030963b627e4bf560874 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='data', size=100) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py index 8d4bf28bf1eaf58e1fd0eb62fd10efe998587edd..22fb25d0f23d8bf401682812b58a1a1d9165d272 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py b/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py index 5c161ba805fb301e8feb8702ad61a8341df40e3f..1883ed9d4ed428b5a986596cc8e2cda19a400375 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_seq_slice_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_seq_slice_layer.py index 510ad3220893fddac278ba691307d00d57e440a3..12d7f1f33b1ce58abcaa415ce8ac827671a023e1 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_seq_slice_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_seq_slice_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python #coding=utf-8 from paddle.trainer_config_helpers import * diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py b/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py index 3c205eabd80492a68383fdbecd14a7d6db3e16eb..8cf5fd70e3fd09a66f8b5d78d09c259112a9dec3 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py b/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py index 66629662dd9166766daaf707409b720f56ef1405..7188d82a534e6d37d2d4be7af35433fab19e260b 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py b/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py index 318b4459bab7a70ddec534c4ad217161ffc72d5a..a628272196a018f756f3e467fd8fb5db2aa56284 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * define_py_data_sources2( diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py index e0b0d0d3be252700d99f7097f0353df885efcf07..58c1675e6b6ea6288376a44d7ce9541d788187e0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=100, learning_rate=1e-5) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_sub_nested_seq_select_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_sub_nested_seq_select_layer.py index 6d1c3175ba9801d69f3f9cb9e754858253192270..64d1d7b6eebe9cf18df82c2ca800794181b701b4 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_sub_nested_seq_select_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_sub_nested_seq_select_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/env python #coding=utf-8 from paddle.trainer_config_helpers import * diff --git a/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py b/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py index ebb39219bdc1fa314e1d70bcda902f71296772f6..6294cb04ef5165eb4fc79dd241b03dda1453cc98 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(batch_size=1000, learning_rate=1e-4) diff --git a/python/paddle/trainer_config_helpers/tests/configs/util_layers.py b/python/paddle/trainer_config_helpers/tests/configs/util_layers.py index 27f1c8e9938cdec12fccb37a3127bba1f8ee8d04..89b881b3611d97f55e3299f761d1dcda946ba076 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/util_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/util_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.trainer_config_helpers import * settings(learning_rate=1e-4, batch_size=1000) diff --git a/python/paddle/utils/image_multiproc.py b/python/paddle/utils/image_multiproc.py index e8db525ff5c388aef1a39d8db56633d509cb4fb9..1acf40df58e7bf5db1a76376037fd06372c9fa8b 100644 --- a/python/paddle/utils/image_multiproc.py +++ b/python/paddle/utils/image_multiproc.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import os, sys import numpy as np from PIL import Image diff --git a/python/paddle/utils/plotcurve.py b/python/paddle/utils/plotcurve.py index 27bd8157d39632913e2fa3278f3af20ddea61da7..27a69b6a5c8b42e6b4c829e3ceebe11bed63ad15 100644 --- a/python/paddle/utils/plotcurve.py +++ b/python/paddle/utils/plotcurve.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. #!/usr/bin/python # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py index 191d9ecfb127c1851a392bc9ec83734d630d0ac4..fab8a68b0beee8b813bee2a05047e2da526a9c9b 100644 --- a/python/paddle/v2/dataset/common.py +++ b/python/paddle/v2/dataset/common.py @@ -23,7 +23,6 @@ import paddle.v2.dataset import cPickle import glob import cPickle as pickle -import random __all__ = [ 'DATA_HOME', 'download', 'md5file', 'split', 'cluster_files_reader', @@ -206,7 +205,6 @@ def convert(output_path, reader, line_count, name_prefix): indx_f = 0 def write_data(indx_f, lines): - random.shuffle(lines) filename = "%s/%s-%05d" % (output_path, name_prefix, indx_f) writer = recordio.writer(filename) for l in lines: diff --git a/python/paddle/v2/dataset/imdb.py b/python/paddle/v2/dataset/imdb.py index 21ed7f7a5ce279f5bc65e5b008f14a1b0ff97343..37c4296f9bcea7e16daa46f778934331513c30c4 100644 --- a/python/paddle/v2/dataset/imdb.py +++ b/python/paddle/v2/dataset/imdb.py @@ -25,7 +25,6 @@ import collections import tarfile import re import string -import random __all__ = ['build_dict', 'train', 'test', 'convert'] @@ -83,7 +82,6 @@ def reader_creator(pos_pattern, neg_pattern, word_idx): load(pos_pattern, INS, 0) load(neg_pattern, INS, 1) - random.shuffle(INS) def reader(): for doc, label in INS: diff --git a/python/paddle/v2/dataset/mq2007.py b/python/paddle/v2/dataset/mq2007.py index b705c9109b2b6769c9fafa9241db5d81c682f9e3..d3b3dd524c34be660c5f2d4fc5ce2fa0420efbc1 100644 --- a/python/paddle/v2/dataset/mq2007.py +++ b/python/paddle/v2/dataset/mq2007.py @@ -24,7 +24,6 @@ http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ20 """ import os -import random import functools import rarfile from common import download @@ -265,7 +264,7 @@ def query_filter(querylists): return filter_query -def load_from_text(filepath, shuffle=True, fill_missing=-1): +def load_from_text(filepath, shuffle=False, fill_missing=-1): """ parse data file into querys """ @@ -287,17 +286,14 @@ def load_from_text(filepath, shuffle=True, fill_missing=-1): querylist._add_query(query) if querylist is not None: querylists.append(querylist) - if shuffle == True: - random.shuffle(querylists) return querylists -def __reader__(filepath, format="pairwise", shuffle=True, fill_missing=-1): +def __reader__(filepath, format="pairwise", shuffle=False, fill_missing=-1): """ Parameters -------- filename : string - shuffle : shuffle query-doc pair under the same query fill_missing : fill the missing value. default in MQ2007 is -1 Returns diff --git a/python/paddle/v2/dataset/sentiment.py b/python/paddle/v2/dataset/sentiment.py index b0b9757c1a75d215cf8945b5cedbb1239fd43af7..7174413018cc29216a99c9291dee5ef723ea95f1 100644 --- a/python/paddle/v2/dataset/sentiment.py +++ b/python/paddle/v2/dataset/sentiment.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. # /usr/bin/env python # -*- coding:utf-8 -*- diff --git a/python/paddle/v2/dataset/tests/imikolov_test.py b/python/paddle/v2/dataset/tests/imikolov_test.py index 4e52810e6b924e0796e3d836dbbcb27ede2c9e25..9b3ab72feb0d30cbd5c86d3add31ee46ccd4d653 100644 --- a/python/paddle/v2/dataset/tests/imikolov_test.py +++ b/python/paddle/v2/dataset/tests/imikolov_test.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.dataset.imikolov import unittest diff --git a/python/paddle/v2/dataset/tests/test_sentiment.py b/python/paddle/v2/dataset/tests/test_sentiment.py index 407405290734609059c1767600748d530e8a13a6..f107948801d09f2e4490c8187217322bd57d5edb 100644 --- a/python/paddle/v2/dataset/tests/test_sentiment.py +++ b/python/paddle/v2/dataset/tests/test_sentiment.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. # /usr/bin/env python # -*- coding:utf-8 -*- diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index a0ffd31c545eb10dd8c2f14746ee90df58700e61..f322bffe133e9a726668495826eb145062f52ac7 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ Testing and training events. diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index ec5159fca161ed1912bc4145e732b7927833cc0b..8c29ee741cbb8f484531c450dd99cf183f78178e 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function # import all class inside framework into fluid module import framework @@ -18,6 +31,7 @@ from param_attr import ParamAttr from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, CUDAPlace from distribute_transpiler import DistributeTranspiler +from distribute_transpiler_simple import SimpleDistributeTranspiler import clip from memory_optimization_transpiler import memory_optimize @@ -37,6 +51,7 @@ __all__ = framework.__all__ + executor.__all__ + [ 'ParamAttr' 'DataFeeder', 'clip', + 'SimpleDistributeTranspiler', 'DistributeTranspiler', 'memory_optimize', ] diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 43f6133a6534efb676dacea2e8b8d25846d91247..27cf637c48bbd5fb8578d1526530b484f3a2c523 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.v2.fluid import framework as framework from . import core import collections diff --git a/python/paddle/v2/fluid/clip.py b/python/paddle/v2/fluid/clip.py index 776c0f3f0276cd228db9846e473c65d44e10bbb7..e4d9ed599ee297b2f3cf85f3eed716b1c49578d3 100644 --- a/python/paddle/v2/fluid/clip.py +++ b/python/paddle/v2/fluid/clip.py @@ -1,9 +1,23 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import functools import layers from . import core __all__ = [ 'GradientClipByValue', + 'ErrorClipByValue', 'append_gradient_clip_ops', 'error_clip_callback', ] @@ -25,12 +39,12 @@ class ErrorClipByValue(BaseErrorClipAttr): self.min = min def append_clip_op(self, block, grad_name): - block.append_op( - type="clip", - inputs={"X": grad_name}, - outputs={"Out": grad_name}, - attrs={"min": self.min, - "max": self.max}) + clip_op_desc = block.desc.append_op() + clip_op_desc.set_type("clip") + clip_op_desc.set_input("X", [grad_name]) + clip_op_desc.set_output("Out", [grad_name]) + clip_op_desc.set_attr("min", self.min) + clip_op_desc.set_attr("max", self.max) def error_clip_callback(block, context): @@ -41,6 +55,11 @@ def error_clip_callback(block, context): op_desc.output_arg_names()): fwd_var = block.var_recursive(grad_to_var[grad_n]) error_clip = getattr(fwd_var, "error_clip", None) + if not (error_clip is None or isinstance(error_clip, + BaseErrorClipAttr)): + raise TypeError( + "Variable's error_clip should be an instance of BaseErrorClipAttr or None." + ) if error_clip is not None: error_clip.append_clip_op(block, grad_n) diff --git a/python/paddle/v2/fluid/data_feeder.py b/python/paddle/v2/fluid/data_feeder.py index 24036c3e75b9594ba58cccb02825ab8020d1e107..bfdd00e3ef77f5977926d452a648df5259f58823 100644 --- a/python/paddle/v2/fluid/data_feeder.py +++ b/python/paddle/v2/fluid/data_feeder.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import core import numpy diff --git a/python/paddle/v2/fluid/default_scope_funcs.py b/python/paddle/v2/fluid/default_scope_funcs.py index 9aebc07f8e8aac2d6bfbe7a7817b4bd261859415..2218bb140ac57bd3d7fa9cd0b97a9bcea48aa201 100644 --- a/python/paddle/v2/fluid/default_scope_funcs.py +++ b/python/paddle/v2/fluid/default_scope_funcs.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ Default scope function. diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 49ece7b725e318d7526d58fe54c97cbe20200a7d..06a7b6fb02f5e38e0762d112492854f027fe66ad 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -1,51 +1,75 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +from __future__ import print_function import framework from framework import Program, default_main_program, Parameter, Variable import optimizer from layer_helper import LayerHelper +from distributed_spliter import * +import math -def hash_name_to_server(params_grads, pserver_endpoints): - """ - :param param_grads: - :return: a map of pserver endpoint -> - params -> [param list] - grads -> [grad list] - """ - - def _hash_param(param_name, total): - return hash(param_name) % total - - param_grad_map = dict() - for param, grad in params_grads: - if param.trainable is True and grad is not None: - server_id = _hash_param(param.name, len(pserver_endpoints)) - server_for_param = pserver_endpoints[server_id] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) - - return param_grad_map +class VarBlock: + def __init__(self, varname, offset, size): + self.varname = varname + # NOTE: real offset is offset * size + self.offset = offset + self.size = size + def __str__(self): + return "%s:%d:%d" % (self.varname, self.offset, self.size) -def round_robin(params_grads, pserver_endpoints): - assert (len(params_grads) > len(pserver_endpoints)) - param_grad_map = dict() - pserver_idx = 0 - for param, grad in params_grads: - if param.trainable is True: - server_for_param = pserver_endpoints[pserver_idx] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) +def split_dense_variable(var_list, + pserver_count, + min_block_size=1024, + max_block_size=1048576): + """ + We may need to split dense tensor to one or several blocks and put + them equally onto parameter server. One block is a sub-tensor + aligned by dim[0] of the tensor. + + We need to have a minimal block size so that the calculations in + the parameter server side can gain better performance. By default + mininum block size is 1024. The max block size is used to prevent + too large block that may causing send error. + """ + blocks = [] + for var in var_list: + split_count = pserver_count + var_numel = reduce(lambda x, y: x * y, var.shape) + max_pserver_count = int(math.floor(var_numel / float(min_block_size))) + if max_pserver_count == 0: + max_pserver_count = 1 + if max_pserver_count < pserver_count: + split_count = max_pserver_count + block_size = int(math.ceil(var_numel / float(split_count))) - pserver_idx += 1 - if pserver_idx >= len(pserver_endpoints): - pserver_idx = 0 - return param_grad_map + if len(var.shape) >= 2: + # align by dim1(width) + dim1 = reduce(lambda x, y: x * y, var.shape[1:]) + remains = block_size % dim1 + if remains != 0: + block_size += dim1 - remains + # update split_count after align + split_count = int(math.ceil(var_numel / float(block_size))) + for block_id in xrange(split_count): + curr_block_size = min(block_size, var_numel - ( + (block_id) * block_size)) + block = VarBlock(var.name, block_id, curr_block_size) + blocks.append(str(block)) + return blocks class DistributeTranspiler: @@ -58,7 +82,6 @@ class DistributeTranspiler: split_method=round_robin): """ Transpile the program to a distributed data-parallelism programs. - The main_program will be transform to use a remote parameter server to do parameter optimization. And the optimization graph will be put in to a parameter server program. @@ -66,60 +89,113 @@ class DistributeTranspiler: Use different methods to split trainable varialbles to different parameter servers. - Example to run: - - exe = fluid.Executor(place) - t = fluid.DistributeTranspiler() - t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) - - pserver_endpoint = os.getenv("PSERVER") - if pserver_endpoint: - pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) - exe.run(fluid.default_startup_program()) - exe.run(pserver_prog) - else: - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - ... - :param optimize_ops: op list of optimization, should be the return value of Optimizer.minimize :type optimize_ops: list :param program: program to optimize, default default_main_program :param pservers: parameter server endpoints like "m1:6174,m2:6174" :type pservers: string - :return: return a list of programs """ + assert (callable(split_method)) if program is None: program = default_main_program() self.program = program self.trainers = trainers self.optimize_ops = optimize_ops - self._optimize_distributed( - optimize_ops, - program, - params_grads, - pservers=pservers, - trainers=trainers, - split_method=split_method) - - def _clone_param(self, block, v): - assert isinstance(v, Parameter) - new_p = Parameter( - block=block, - shape=v.shape, - dtype=v.dtype, - type=v.type, - lod_level=v.lod_level, - stop_gradient=v.stop_gradient, - trainable=v.trainable, - optimize_attr=v.optimize_attr, - regularizer=v.regularizer, - name=v.name) - block.vars[new_p.name] = new_p + # steps to transpile: + # 1. split variable to multiple blocks, align by product(dim[1:]) (width). + # 2. modify trainer program add split_op to each Grad. + # 3. append send_op to trainer. + # 4. append concat_op to trainer to update local weights. + # 5. create new program as parameter server. + # 6. create parameter server program by split_method generated endpoint->VarBlock + + pserver_endpoints = pservers.split(",") + + # step1 + param_list = [pg[0] for pg in params_grads] + grad_list = [pg[1] for pg in params_grads] + # TODO: add split selected rows support + grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) + param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) + # step2 + grad_var_mapping = self._append_split_op(program, grad_blocks) + + # step3 + send_inputs = [] + send_outputs = [] + for b in grad_blocks: # append by order + varname, block_id, _ = b.split(":") + send_inputs.append(grad_var_mapping[varname][int(block_id)]) + + param_var_mapping = self._create_vars_from_blocklist(program, + param_blocks) + for b in param_blocks: + varname, block_id, _ = b.split(":") + send_outputs.append(param_var_mapping[varname][int(block_id)]) + # let send_op know which endpoint to send which var, eplist is of the same + # order of send_inputs. + eplist = split_method(send_inputs, pserver_endpoints) + # create mapping of endpoint -> splited var to create pserver side program + self.param_grad_ep_mapping = dict() + for i, ep in enumerate(eplist): + param = send_outputs[i] + grad = send_inputs[i] + if not self.param_grad_ep_mapping.has_key(ep): + self.param_grad_ep_mapping[ep] = {"params": [], "grads": []} + self.param_grad_ep_mapping[ep]["params"].append(param) + self.param_grad_ep_mapping[ep]["grads"].append(grad) + + send_op = program.global_block().append_op( + type="send", + inputs={"X": send_inputs}, + outputs={"Out": send_outputs}, + attrs={"endpoints": pserver_endpoints, + "epmap": eplist}) + # step4 + for varname, splited_var in param_var_mapping.iteritems(): + if len(splited_var) <= 1: + continue + orig_param = program.global_block().vars[varname] + concat = program.global_block().append_op( + type="concat", + inputs={"X": splited_var}, + outputs={"Out": [orig_param]}, + attrs={"axis": 0}) + + def _create_vars_from_blocklist(self, program, block_list): + block_map = dict() + var_mapping = dict() + for block_str in block_list: + varname, offset, size = block_str.split(":") + if not block_map.has_key(varname): + block_map[varname] = [] + block_map[varname].append((long(offset), long(size))) + for varname, splited in block_map.iteritems(): + orig_var = program.global_block().vars[varname] + var_mapping[varname] = [] + if len(splited) == 1: + var_mapping[varname] = [orig_var] + continue + orig_shape = orig_var.shape + orig_dim1_flatten = 1 + if len(orig_shape) >= 2: + orig_dim1_flatten = reduce(lambda x, y: x * y, orig_shape[1:]) + + for i, block in enumerate(splited): + size = block[1] + rows = size / orig_dim1_flatten + splited_shape = [rows] + if len(orig_shape) >= 2: + splited_shape.extend(orig_shape[1:]) + var = program.global_block().create_var( + name="%s.block%d" % (varname, i), + psersistable=False, + dtype=orig_var.dtype, + shape=splited_shape) # flattend splited var + var_mapping[varname].append(var) + return var_mapping def _clone_var(self, block, var): assert isinstance(var, Variable) @@ -129,34 +205,27 @@ class DistributeTranspiler: dtype=var.dtype, type=var.type, lod_level=var.lod_level, - persistable=var.persistable) + # HACK: let all param in pserver persistable so child + # program in recv can get them + persistable=True) - def _optimize_distributed(self, optimize_ops, program, params_and_grads, - **kwargs): - if kwargs.has_key("split_method"): - split_method = kwargs["split_method"] - else: - split_method = round_robin - - assert (callable(split_method)) - pserver_endpoints = kwargs["pservers"].split(",") - self.param_grad_map = split_method(params_and_grads, pserver_endpoints) - - send_op_ordered_inputs = [] - send_op_ordered_outputs = [] - epmap = [] - for ep, v in self.param_grad_map.iteritems(): - send_op_ordered_inputs.extend(v["grads"]) - send_op_ordered_outputs.extend(v["params"]) - for i in v["grads"]: - epmap.append(ep) - send_op = program.global_block().append_op( - type="send", - inputs={"X": send_op_ordered_inputs - }, # inputs is a list of tensors to be send - outputs={"Out": send_op_ordered_outputs}, - attrs={"endpoints": pserver_endpoints, - "epmap": epmap}) + def _append_split_op(self, program, gradblocks): + var_mapping = self._create_vars_from_blocklist(program, gradblocks) + for varname, splited_vars in var_mapping.iteritems(): + # variable that don't need to split have empty splited_vars + if len(splited_vars) <= 1: + continue + orig_var = program.global_block().vars[varname] + sections = [] + for v in splited_vars: + sections.append(v.shape[0]) + program.global_block().append_op( + type="split", + inputs={"X": orig_var}, + outputs={"Out": splited_vars}, + attrs={"sections": sections} # assume split evenly + ) + return var_mapping def get_trainer_program(self): # remove optimize ops and add a send op to main_program @@ -174,69 +243,267 @@ class DistributeTranspiler: var_list.append(var_each) return var_list - def get_pserver_program(self, endpoint, optimize_ops): - pserver_program = Program() - for v in self.param_grad_map[endpoint]["params"]: - self._clone_param(pserver_program.global_block(), v) + def _get_optimizer_input_shape(self, op_type, varkey, orig_shape, + param_shape): + """ + Returns the shape for optimizer inputs that need to be reshaped when + Param and Grad is splited to multiple servers. + """ + # HACK(typhoonzero): Should use functions of corresponding optimizer in + # optimizer.py to get the shape, do not bind this in the transpiler. + if op_type == "adam": + if varkey in ["Moment1", "Moment2"]: + return param_shape + elif op_type == "adagrad": + if varkey == "Moment": + return param_shape + elif op_type == "adamax": + if varkey in ["Moment", "InfNorm"]: + return param_shape + elif op_type == "momentum": + if varkey == "Velocity": + return param_shape + elif op_type == "": + if varkey == "Moment": + return param_shape + elif op_type == "sgd": + pass + return orig_shape - optimize_sub_program = Program() - grad_var_names = [ - var.name for var in self.param_grad_map[endpoint]["grads"] + def _is_op_on_pserver(self, endpoint, all_ops, idx): + """ + Recursively check if the op need to run on current server. + Assume that ops are in the execution order. + """ + param_names = [ + p.name for p in self.param_grad_ep_mapping[endpoint]["params"] ] - for opt_op in optimize_ops: - for _, var in opt_op.inputs.iteritems(): - # NOTE: append operators to merge gradients from multiple - # trainers. If trainers == 1, this is not needed. - if self.trainers > 1 and var.name in grad_var_names: + op = all_ops[idx] + if op.inputs.has_key("Param"): + if op.inputs["Param"].name in param_names: + return True + else: + for n in param_names: + if n.startswith(op.inputs["Param"].name+".block") and \ + n != op.inputs["Param"].name: + return True + return False + else: + j = idx - 1 + while j >= 0: + prev_op = all_ops[j] + prev_output_names = [o.name for o in prev_op.outputs.values()] + prev_input_names = [o.name for o in prev_op.inputs.values()] + found1 = False + found2 = False + for _, v in op.inputs.iteritems(): + if v.name in prev_output_names: + found1 = self._is_op_on_pserver(endpoint, all_ops, j) + # later ops may produce output for prev op's next batch use. + for _, v in op.outputs.iteritems(): + if v.name in prev_input_names: + found2 = self._is_op_on_pserver(endpoint, all_ops, j) + if found1 or found2: + return True + j -= 1 + return False + + def _append_pserver_ops(self, program, pserver_program, opt_op, endpoint): + new_inputs = dict() + # update param/grad shape first, then other inputs like + # moment can use the updated shape + for key, var in opt_op.inputs.iteritems(): + if key == "Grad": + grad_block = None + for g in self.param_grad_ep_mapping[endpoint]["grads"]: + if g.name.startswith(var.name): + grad_block = g + break + if not grad_block: + # do not append this op if current endpoint + # is not dealing with this grad block + return + merged_var = program.global_block().create_var( + name=grad_block.name, + persistable=grad_block.persistable, + dtype=grad_block.dtype, + shape=grad_block.shape) + # append merging ops if trainers > 1 + if self.trainers > 1: vars2merge = self._create_var_for_trainers( - optimize_sub_program.global_block(), var, self.trainers) - merged_var = optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - optimize_sub_program.global_block().append_op( + program.global_block(), grad_block, self.trainers) + program.global_block().append_op( type="sum", inputs={"X": vars2merge}, outputs={"Out": merged_var}) - optimize_sub_program.global_block().append_op( + program.global_block().append_op( type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, attrs={"scale": 1.0 / float(self.trainers)}) - else: - optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + new_inputs[key] = merged_var + elif key == "Param": + # param is already created on global program + param_block = None + for p in self.param_grad_ep_mapping[endpoint]["params"]: + if p.name.startswith(var.name): + param_block = p + break + if not param_block: + return + tmpvar = program.global_block().create_var( + name=param_block.name, + persistable=True, + dtype=param_block.dtype, + shape=param_block.shape) + + new_inputs[key] = tmpvar + for key, var in opt_op.inputs.iteritems(): + if key in ["Param", "Grad"]: + continue + # update accumulator variable shape + param_shape = new_inputs["Param"].shape + new_shape = self._get_optimizer_input_shape(opt_op.type, key, + var.shape, param_shape) + tmpvar = program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=new_shape) + new_inputs[key] = tmpvar + # create var in pserver program global block. + # TODO(typhoonzero): put blocks in one program to avoid create two + # variables. + pserver_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=new_shape) + + # change outputs ParamOut variable + opt_op.outputs["ParamOut"] = new_inputs["Param"] + program.global_block().append_op( + type=opt_op.type, + inputs=new_inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + + def _append_pserver_non_opt_ops(self, program, pserver_program, opt_op): + for _, var in opt_op.inputs.iteritems(): + program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + pserver_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + + def get_pserver_program(self, endpoint, optimize_ops): + """ + get pserver side program by endpoint + + NOTE: assume blocks of the same variable is not distributed + on the same pserver, only change param/grad varnames for + trainers to fetch. For each pserver endpoint, server side + program must be a sub-set of the original optimization program. + """ + # step5 + pserver_program = Program() + for v in self.param_grad_ep_mapping[endpoint]["params"]: + self._clone_var(pserver_program.global_block(), v) + # step6 + optimize_sub_program = Program() + for idx, opt_op in enumerate(optimize_ops): + is_op_on_pserver = self._is_op_on_pserver(endpoint, optimize_ops, + idx) + if not is_op_on_pserver: + continue if opt_op.inputs.has_key("Grad"): - if opt_op.inputs["Grad"].name in grad_var_names: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) + self._append_pserver_ops(optimize_sub_program, pserver_program, + opt_op, endpoint) else: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) + self._append_pserver_non_opt_ops(optimize_sub_program, + pserver_program, opt_op) pserver_program.global_block().append_op( type="recv", - inputs={"RX": - self.param_grad_map[endpoint]["grads"]}, # grads to recv + inputs={"RX": self.param_grad_ep_mapping[endpoint]["grads"] + }, # grads to recv outputs={}, attrs={ "OptimizeProgram": optimize_sub_program.desc, "endpoint": endpoint, - "ParamList": - [p.name for p in self.param_grad_map[endpoint]["params"]], - "GradList": - [p.name for p in self.param_grad_map[endpoint]["grads"]], + "ParamList": [ + p.name + for p in self.param_grad_ep_mapping[endpoint]["params"] + ], + "GradList": [ + p.name + for p in self.param_grad_ep_mapping[endpoint]["grads"] + ], "Trainers": self.trainers }) pserver_program.sync_with_cpp() return pserver_program + + def get_startup_program(self, endpoint, pserver_program): + """ + Get startup program for current parameter server. + Modify operator input variables if there are variables that + was splited to several blocks. + """ + s_prog = Program() + orig_s_prog = framework.default_startup_program() + params = self.param_grad_ep_mapping[endpoint]["params"] + + def _get_splited_name_and_shape(varname): + for idx, splited_param in enumerate(params): + pname = splited_param.name + if pname.startswith(varname) and varname != pname: + return pname, splited_param.shape + return "", [] + + # 1. create vars in pserver program to startup program + pserver_vars = pserver_program.global_block().vars + created_var_map = dict() + for _, var in pserver_vars.iteritems(): + tmpvar = s_prog.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + created_var_map[var.name] = tmpvar + + # 2. rename op outputs + for op in orig_s_prog.global_block().ops: + new_outputs = dict() + # do not append startup op if var is not on this pserver + op_on_pserver = False + for key, var in op.outputs.iteritems(): + newname, _ = _get_splited_name_and_shape(var.name) + if newname: + op_on_pserver = True + new_outputs[key] = created_var_map[newname] + elif var.name in pserver_vars: + op_on_pserver = True + new_outputs[key] = pserver_vars[var.name] + + if op_on_pserver: + if op.type in [ + "gaussian_random", "fill_constant", "uniform_random" + ]: + op.attrs["shape"] = new_outputs["Out"].shape + s_prog.global_block().append_op( + type=op.type, + inputs=op.inputs, + outputs=new_outputs, + attrs=op.attrs) + return s_prog diff --git a/python/paddle/v2/fluid/distribute_transpiler_simple.py b/python/paddle/v2/fluid/distribute_transpiler_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..bd88f02bde0c6a58138e20db2b07cbd06cd40ba3 --- /dev/null +++ b/python/paddle/v2/fluid/distribute_transpiler_simple.py @@ -0,0 +1,255 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import framework +from framework import Program, default_main_program, Parameter, Variable +import optimizer +from layer_helper import LayerHelper + + +def hash_name_to_server(params_grads, pserver_endpoints): + """ + :param param_grads: + :return: a map of pserver endpoint -> + params -> [param list] + grads -> [grad list] + """ + + def _hash_param(param_name, total): + return hash(param_name) % total + + param_grad_map = dict() + for param, grad in params_grads: + if param.trainable is True and grad is not None: + server_id = _hash_param(param.name, len(pserver_endpoints)) + server_for_param = pserver_endpoints[server_id] + if not param_grad_map.has_key(server_for_param): + param_grad_map[server_for_param] = {"params": [], "grads": []} + param_grad_map[server_for_param]["params"].append(param) + param_grad_map[server_for_param]["grads"].append(grad) + + return param_grad_map + + +def round_robin(params_grads, pserver_endpoints): + assert (len(params_grads) > len(pserver_endpoints)) + + param_grad_map = dict() + pserver_idx = 0 + for param, grad in params_grads: + if param.trainable is True: + server_for_param = pserver_endpoints[pserver_idx] + if not param_grad_map.has_key(server_for_param): + param_grad_map[server_for_param] = {"params": [], "grads": []} + + param_grad_map[server_for_param]["params"].append(param) + param_grad_map[server_for_param]["grads"].append(grad) + + pserver_idx += 1 + if pserver_idx >= len(pserver_endpoints): + pserver_idx = 0 + return param_grad_map + + +class SimpleDistributeTranspiler: + def transpile(self, + optimize_ops, + params_grads, + program=None, + pservers="127.0.0.1:6174", + trainers=1, + split_method=round_robin): + """ + Transpile the program to a distributed data-parallelism programs. + + The main_program will be transform to use a remote parameter server + to do parameter optimization. And the optimization graph will be put + in to a parameter server program. + + Use different methods to split trainable varialbles to different + parameter servers. + + Example to run: + + exe = fluid.Executor(place) + t = fluid.DistributeTranspiler() + t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) + + pserver_endpoint = os.getenv("PSERVER") + if pserver_endpoint: + pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) + exe.run(fluid.default_startup_program()) + exe.run(pserver_prog) + else: + feeder = fluid.DataFeeder(feed_list=[images, label], place=place) + exe.run(fluid.default_startup_program()) + + for pass_id in range(PASS_NUM): + ... + + :param optimize_ops: op list of optimization, should be the + return value of Optimizer.minimize + :type optimize_ops: list + :param program: program to optimize, default default_main_program + :param pservers: parameter server endpoints like "m1:6174,m2:6174" + :type pservers: string + + :return: return a list of programs + """ + if program is None: + program = default_main_program() + self.program = program + self.trainers = trainers + self.optimize_ops = optimize_ops + self._optimize_distributed( + optimize_ops, + program, + params_grads, + pservers=pservers, + trainers=trainers, + split_method=split_method) + + def _clone_param(self, block, v): + assert isinstance(v, Parameter) + new_p = Parameter( + block=block, + shape=v.shape, + dtype=v.dtype, + type=v.type, + lod_level=v.lod_level, + stop_gradient=v.stop_gradient, + trainable=v.trainable, + optimize_attr=v.optimize_attr, + regularizer=v.regularizer, + name=v.name) + block.vars[new_p.name] = new_p + + def _clone_var(self, block, var): + assert isinstance(var, Variable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=var.persistable) + + def _optimize_distributed(self, optimize_ops, program, params_and_grads, + **kwargs): + if kwargs.has_key("split_method"): + split_method = kwargs["split_method"] + else: + split_method = round_robin + + assert (callable(split_method)) + pserver_endpoints = kwargs["pservers"].split(",") + self.param_grad_map = split_method(params_and_grads, pserver_endpoints) + + send_op_ordered_inputs = [] + send_op_ordered_outputs = [] + epmap = [] + for ep, v in self.param_grad_map.iteritems(): + send_op_ordered_inputs.extend(v["grads"]) + send_op_ordered_outputs.extend(v["params"]) + for i in v["grads"]: + epmap.append(ep) + send_op = program.global_block().append_op( + type="send", + inputs={"X": send_op_ordered_inputs + }, # inputs is a list of tensors to be send + outputs={"Out": send_op_ordered_outputs}, + attrs={"endpoints": pserver_endpoints, + "epmap": epmap}) + + def get_trainer_program(self): + # remove optimize ops and add a send op to main_program + self.program.global_block().delete_ops(self.optimize_ops) + return self.program + + def _create_var_for_trainers(self, block, var, trainers): + var_list = [] + for i in xrange(trainers): + var_each = block.create_var( + name="%s.trainer_%d" % (var.name, i), + psersistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + var_list.append(var_each) + return var_list + + def get_pserver_program(self, endpoint, optimize_ops): + pserver_program = Program() + for v in self.param_grad_map[endpoint]["params"]: + self._clone_param(pserver_program.global_block(), v) + + optimize_sub_program = Program() + grad_var_names = [ + var.name for var in self.param_grad_map[endpoint]["grads"] + ] + for opt_op in optimize_ops: + for _, var in opt_op.inputs.iteritems(): + # NOTE: append operators to merge gradients from multiple + # trainers. If trainers == 1, this is not needed. + if self.trainers > 1 and var.name in grad_var_names: + vars2merge = self._create_var_for_trainers( + optimize_sub_program.global_block(), var, self.trainers) + merged_var = optimize_sub_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + optimize_sub_program.global_block().append_op( + type="sum", + inputs={"X": vars2merge}, + outputs={"Out": merged_var}) + optimize_sub_program.global_block().append_op( + type="scale", + inputs={"X": merged_var}, + outputs={"Out": merged_var}, + attrs={"scale": 1.0 / float(self.trainers)}) + else: + optimize_sub_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + + if opt_op.inputs.has_key("Grad"): + if opt_op.inputs["Grad"].name in grad_var_names: + optimize_sub_program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + else: + optimize_sub_program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + pserver_program.global_block().append_op( + type="recv", + inputs={"RX": + self.param_grad_map[endpoint]["grads"]}, # grads to recv + outputs={}, + attrs={ + "OptimizeProgram": optimize_sub_program.desc, + "endpoint": endpoint, + "ParamList": + [p.name for p in self.param_grad_map[endpoint]["params"]], + "GradList": + [p.name for p in self.param_grad_map[endpoint]["grads"]], + "Trainers": self.trainers + }) + pserver_program.sync_with_cpp() + return pserver_program diff --git a/python/paddle/v2/fluid/distributed_spliter.py b/python/paddle/v2/fluid/distributed_spliter.py new file mode 100644 index 0000000000000000000000000000000000000000..e647f760e9d3d400e28f54215b684079b2279ffc --- /dev/null +++ b/python/paddle/v2/fluid/distributed_spliter.py @@ -0,0 +1,48 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +def hash_name(varlist, pserver_endpoints): + """ + hash variable names to several endpoints. + + :param varlist: a list of Variables + :return: a map of pserver endpoint -> varname + """ + + def _hash_block(block_str, total): + return hash(block_str) % total + + eplist = [] + for var in varlist: + server_id = _hash_block(var.name(), len(pserver_endpoints)) + server_for_param = pserver_endpoints[server_id] + eplist.append(server_for_param) + return eplist + + +def round_robin(varlist, pserver_endpoints): + """ + distribute variables to several endpoints. + """ + assert (len(varlist) > len(pserver_endpoints)) + + eplist = [] + pserver_idx = 0 + for var in varlist: + server_for_param = pserver_endpoints[pserver_idx] + eplist.append(server_for_param) + + pserver_idx += 1 + if pserver_idx >= len(pserver_endpoints): + pserver_idx = 0 + return eplist diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index dc083f37b5f357e835fc1a45c25a420b2c3d9798..adf174a07daeea521fa3a1c97273ec68b3a9a67f 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import layers diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 1b2075dcd5ece5706e62431b360d4dc86ea57a89..a99c5157b285d7edbf06398c00df3e7ec514cd91 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import contextlib from framework import Program, default_main_program diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index bdbfe9da0772fdbd00dfc8ed00413ece56f48407..4f8366b64039b2edcd4c273439c87397bdc33595 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import collections import contextlib @@ -103,8 +116,8 @@ def _debug_string_(proto, throw_on_error=True): """ error_fields = list() if not proto.IsInitialized(error_fields) and throw_on_error: - raise ValueError("{0} are not initialized\nThe message is {1}".format( - error_fields, proto)) + raise ValueError("{0} are not initialized.\nThe message is {1}:\n". + format(error_fields, proto)) return proto.__str__() @@ -280,6 +293,9 @@ class Variable(object): uid = core.unique_integer(prefix) # unique during whole process. return "_".join([prefix, str(uid)]) + def set_error_clip(self, error_clip): + self.error_clip = error_clip + def get_all_op_protos(): """ @@ -358,12 +374,13 @@ class Operator(object): >>> outputs={"Out": [var1]}) Args: - block(Block): The block has the current operator - desc(core.OpDesc): The protobuf description + block(Block): The block has the current operator. + desc(core.OpDesc): The protobuf description. type(str): The type of operator. inputs(dict): The input dictionary. Key is the input parameter name. Value is a list of variables. - outputs(dict): The output dictionary. Has same format with inputs + outputs(dict): The output dictionary which has the same format with + inputs. attrs(dict): The attributes dictionary. Key is attribute name. Value is the attribute value. The attribute type should be as same as the type registered in C++ @@ -420,10 +437,11 @@ class Operator(object): for m in proto.outputs: need.add(m.name) if not given == need: - raise ValueError( - "Incorrect setting for output(s) of operator \"%s\". Need: [%s] Given: [%s]" - % (type, ", ".join(str(e) for e in need), ", ".join( - str(e) for e in given))) + raise ValueError(("Incorrect setting for output(s) of " + "operator \"%s\". Need: [%s] Given: [%s]") % + (type, ", ".join(str(e) + for e in need), ", ".join( + str(e) for e in given))) for out_proto in proto.outputs: out_args = outputs[out_proto.name] @@ -802,9 +820,8 @@ class Program(object): if isinstance(t, Variable): t = t.op else: - raise ValueError( - "All targets of prune() can only be Variable or Operator." - ) + raise ValueError(("All targets of prune() can only be " + "Variable or Operator.")) targets_idx.append([t.block.idx, t.idx]) res = Program() diff --git a/python/paddle/v2/fluid/initializer.py b/python/paddle/v2/fluid/initializer.py index c3ed1a9089603abe86d815f6826d084d23e01d99..2e8cfa3177ba00b48a1297b1deec7350588745f2 100644 --- a/python/paddle/v2/fluid/initializer.py +++ b/python/paddle/v2/fluid/initializer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import framework import numpy as np diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index 54b6978ebaa02e1a070a666f60cd61b66d3ac1f8..499df05e592855f63f41ec8ceb939edf0e4d435c 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import os import cPickle as pickle diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py index 325735e67936ed40ae83a11ce2e45e2f618d3ac6..191d2349b5e692f1f8ad9c068daf5592031433ad 100644 --- a/python/paddle/v2/fluid/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import copy import itertools diff --git a/python/paddle/v2/fluid/layers/__init__.py b/python/paddle/v2/fluid/layers/__init__.py index 50ac0aba01a4079e7caf49d552c9361977aaa65d..c190af3329409e5b87b182a11a84dc87dfc46d6e 100644 --- a/python/paddle/v2/fluid/layers/__init__.py +++ b/python/paddle/v2/fluid/layers/__init__.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import ops from ops import * import nn diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 4b363ecbe78af82733fe1f80e44118a0dfda1f11..e72b22c83f65789b9e5d81611bec602d8d78be6b 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from ..layer_helper import LayerHelper, unique_name from ..framework import Program, Variable, Operator from .. import core @@ -117,7 +130,8 @@ def Print(input, print_tensor_name=True, print_tensor_type=True, print_tensor_shape=True, - print_tensor_lod=True): + print_tensor_lod=True, + print_phase='both'): ''' **Print operator** @@ -128,18 +142,21 @@ def Print(input, tensor `t`. Args: - input(Variable): A Tensor to print. - summarize(int): Print this number of elements in the tensor, will print all - if left negative. - message(str): A string message to print as a prefix. - first_n(int): Only log `first_n` number of times. - print_tensor_name(bool): Print the tensor name. - print_tensor_type(bool): Print the tensor type. - print_tensor_shape(bool): Print the tensor shape. - print_tensor_lod(bool): Print the tensor lod. + input (Variable): A Tensor to print. + summarize (int): Print this number of elements in the tensor, will print + all if left is negative. + message (str): A string message to print as a prefix. + first_n (int): Only log `first_n` number of times. + print_tensor_name (bool): Print the tensor name. + print_tensor_type (bool): Print the tensor type. + print_tensor_shape (bool): Print the tensor shape. + print_tensor_lod (bool): Print the tensor lod. + print_phase (bool): Which phase to displace, including 'forward', + 'backward' and 'both'. If set to 'backward' or 'both', will + print the gradients of input tensor. Returns: - None + Variable: Output tensor, same data with input tensor. Examples: .. code-block:: python @@ -149,10 +166,10 @@ def Print(input, message="The content of some_layer: ") ''' helper = LayerHelper('print', **locals()) - out = helper.create_tmp_variable(dtype='int32') + out = helper.create_tmp_variable(dtype=helper.input_dtype()) helper.append_op( type='print', - inputs={'input': input}, + inputs={'In': input}, attrs={ 'first_n': first_n, 'summarize': summarize, @@ -161,7 +178,9 @@ def Print(input, 'print_tensor_type': print_tensor_type, 'print_tensor_shape': print_tensor_shape, 'print_tensor_lod': print_tensor_lod, - }) + 'print_phase': print_phase.upper() + }, + outputs={'Out': out}) return out @@ -1220,7 +1239,8 @@ class DynamicRNN(object): self.lod_rank_table = None self.max_seq_len = None self.step_idx = None - self.zero_idx = fill_constant(shape=[1], value=0, dtype='int64') + self.zero_idx = fill_constant( + shape=[1], value=0, dtype='int64', force_cpu=True) self.mem_dict = dict() self.output_array = [] self.outputs = [] @@ -1271,11 +1291,32 @@ class DynamicRNN(object): outputs={'Out': input_array}) return array_read(array=input_array, i=self.step_idx) + def static_input(self, x): + self._assert_in_rnn_block_("static_input") + if not isinstance(x, Variable): + raise TypeError( + "static_input() can only take a Variable as its input") + if self.lod_rank_table is None: + raise RuntimeError( + "static_input() must be called after step_input().") + parent_block = self._parent_block_() + x_reordered = parent_block.create_var( + name=unique_name("dynamic_rnn_static_input_reordered"), + type=core.VarDesc.VarType.LOD_TENSOR, + dtype=x.dtype) + parent_block.append_op( + type='reorder_lod_tensor_by_rank', + inputs={'X': [x], + 'RankTable': [self.lod_rank_table]}, + outputs={'Out': [x_reordered]}) + return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table) + @contextlib.contextmanager def block(self): if self.status != DynamicRNN.BEFORE_RNN: raise ValueError("rnn.block() can only be invoke once") - self.step_idx = fill_constant(shape=[1], dtype='int64', value=0) + self.step_idx = fill_constant( + shape=[1], dtype='int64', value=0, force_cpu=True) self.step_idx.stop_gradient = False self.status = DynamicRNN.IN_RNN with self.while_op.block(): @@ -1302,20 +1343,44 @@ class DynamicRNN(object): else: return self.outputs - def memory(self, init=None, shape=None, value=0.0, dtype='float32'): + def memory(self, + init=None, + shape=None, + value=0.0, + need_reorder=False, + dtype='float32'): self._assert_in_rnn_block_('memory') if init is not None: if not isinstance(init, Variable): raise TypeError( "The input arg `init` of memory() must be a Variable") parent_block = self._parent_block_() + init_tensor = init + if need_reorder == True: + if self.lod_rank_table is None: + raise ValueError( + 'If set need_reorder to True, make sure step_input be ' + 'invoked before ' + 'memory(init=init, need_reordered=True, ...).') + init_reordered = parent_block.create_var( + name=unique_name('dynamic_rnn_mem_init_reordered'), + type=core.VarDesc.VarType.LOD_TENSOR, + dtype=init.dtype) + parent_block.append_op( + type='reorder_lod_tensor_by_rank', + inputs={ + 'X': [init_tensor], + 'RankTable': [self.lod_rank_table] + }, + outputs={'Out': [init_reordered]}) + init_tensor = init_reordered mem_array = parent_block.create_var( name=unique_name('dynamic_rnn_mem_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, dtype=init.dtype) parent_block.append_op( type='write_to_array', - inputs={'X': init, + inputs={'X': init_tensor, 'I': self.zero_idx}, outputs={'Out': mem_array}) retv = array_read(array=mem_array, i=self.step_idx) diff --git a/python/paddle/v2/fluid/layers/device.py b/python/paddle/v2/fluid/layers/device.py index 775d40e5b5ef0cbb0b62bdc0678f2368b7b1a59a..ef74b2b2f08ae446b612a5e5552344b3901d8178 100644 --- a/python/paddle/v2/fluid/layers/device.py +++ b/python/paddle/v2/fluid/layers/device.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ All util layers. """ diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/v2/fluid/layers/io.py index 56c3f7b7b7f174338bb56bc5785423ca634650a6..a43e0ee4def668bf7033f37cfa1a3f59d10a88d0 100644 --- a/python/paddle/v2/fluid/layers/io.py +++ b/python/paddle/v2/fluid/layers/io.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from .. import core from ..layer_helper import LayerHelper @@ -15,9 +28,9 @@ def data(name, **Data Layer** This function takes in the input and based on whether data has - to be returned back as a minibatch, it creates the global variable using + to be returned back as a minibatch, it creates the global variable by using the helper functions. The global variables can be accessed by all the - following operations and layers in the graph. + following operators in the graph. All the input variables of this function are passed in as local variables to the LayerHelper constructor. diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 251a1535d87b59001f7a089f41f7a0ce070d0c15..fc4c22e1526be3cbdf683c67649dd369d09114bf 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ All layers just related to the neural network. """ @@ -36,6 +49,9 @@ __all__ = [ 'sequence_first_step', 'sequence_last_step', 'dropout', + 'split', + 'l2_normalize', + 'matmul', ] @@ -940,7 +956,8 @@ def pool2d(input, pool_stride=None, pool_padding=None, global_pooling=False, - use_cudnn=True): + use_cudnn=True, + name=None): """ This function adds the operator for pooling in 2 dimensions, using the pooling configurations mentioned in input parameters. @@ -989,7 +1006,8 @@ def batch_norm(input, epsilon=1e-05, param_attr=None, bias_attr=None, - data_layout='NCHW'): + data_layout='NCHW', + name=None): """ This function helps create an operator to implement the BatchNorm layer using the configurations from the input parameters. @@ -1065,7 +1083,7 @@ def batch_norm(input, return helper.append_activation(batch_norm_out) -def beam_search_decode(ids, scores): +def beam_search_decode(ids, scores, name=None): helper = LayerHelper('beam_search_decode', **locals()) sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) @@ -1090,7 +1108,8 @@ def conv2d_transpose(input, stride=None, dilation=None, param_attr=None, - use_cudnn=True): + use_cudnn=True, + name=None): """ The transpose of conv2d layer. @@ -1119,6 +1138,8 @@ def conv2d_transpose(input, param_attr: Parameter Attribute. use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: Output image. @@ -1186,7 +1207,7 @@ def conv2d_transpose(input, return out -def sequence_expand(x, y): +def sequence_expand(x, y, name=None): """Sequence Expand Layer. This layer will expand the input variable **x** according to LoD information of **y**. And the following examples will explain how sequence_expand works: @@ -1230,6 +1251,8 @@ def sequence_expand(x, y): Args: x (Variable): The input variable which is a Tensor or LoDTensor. y (Variable): The input variable which is a LoDTensor. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The expanded variable which is a LoDTensor. @@ -1256,7 +1279,8 @@ def lstm_unit(x_t, cell_t_prev, forget_bias=0.0, param_attr=None, - bias_attr=None): + bias_attr=None, + name=None): """Lstm unit layer. The equation of a lstm step is: .. math:: @@ -1303,6 +1327,8 @@ def lstm_unit(x_t, initializer, name etc. bias_attr (ParamAttr): The attributes of bias weights, if not False, bias weights will be created and be set to default value. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: tuple: The hidden value and cell value of lstm unit. @@ -1368,7 +1394,7 @@ def lstm_unit(x_t, return h, c -def reduce_sum(input, dim=None, keep_dim=False): +def reduce_sum(input, dim=None, keep_dim=False, name=None): """ Computes the sum of tensor elements over the given dimension. @@ -1382,6 +1408,8 @@ def reduce_sum(input, dim=None, keep_dim=False): keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The reduced Tensor variable. @@ -1412,7 +1440,7 @@ def reduce_sum(input, dim=None, keep_dim=False): return out -def reduce_mean(input, dim=None, keep_dim=False): +def reduce_mean(input, dim=None, keep_dim=False, name=None): """ Computes the mean of tensor elements over the given dimension. @@ -1426,6 +1454,8 @@ def reduce_mean(input, dim=None, keep_dim=False): keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The reduced Tensor variable. @@ -1456,7 +1486,7 @@ def reduce_mean(input, dim=None, keep_dim=False): return out -def reduce_max(input, dim=None, keep_dim=False): +def reduce_max(input, dim=None, keep_dim=False, name=None): """ Computes the maximum of tensor elements over the given dimension. @@ -1470,6 +1500,8 @@ def reduce_max(input, dim=None, keep_dim=False): keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The reduced Tensor variable. @@ -1500,7 +1532,7 @@ def reduce_max(input, dim=None, keep_dim=False): return out -def reduce_min(input, dim=None, keep_dim=False): +def reduce_min(input, dim=None, keep_dim=False, name=None): """ Computes the minimum of tensor elements over the given dimension. @@ -1514,6 +1546,8 @@ def reduce_min(input, dim=None, keep_dim=False): keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The reduced Tensor variable. @@ -1542,3 +1576,215 @@ def reduce_min(input, dim=None, keep_dim=False): 'reduce_all': True if dim == None else False }) return out + + +def split(input, num_or_sections, dim=-1, name=None): + """ + Split the input tensor into multiple sub-tensors. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + num_or_sections (int|list): If :attr:`num_or_sections` is an integer, + then the integer indicates the number of equal sized sub-tensors + that the tensor will be divided into. If :attr:`num_or_sections` + is a list of integers, the length of list indicates the number of + sub-tensors and the integers indicate the sizes of sub-tensors' + :attr:`dim` dimension orderly. + dim (int): The dimension along which to split. If :math:`dim < 0`, the + dimension to split along is :math:`rank(input) + dim`. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + List: The list of segmented tensor variables. + + Examples: + .. code-block:: python + + # x is a Tensor variable with shape [3, 9, 5]: + x0, x1, x2 = fluid.layers.split(x, num_or_sections=3, dim=1) + x0.shape # [3, 3, 5] + x1.shape # [3, 3, 5] + x2.shape # [3, 3, 5] + x0, x1, x2 = fluid.layers.split(x, num_or_sections=[2, 3, 4], dim=1) + x0.shape # [3, 2, 5] + x1.shape # [3, 3, 5] + x2.shape # [3, 4, 5] + """ + helper = LayerHelper('split', **locals()) + input_shape = input.shape + dim = (len(input_shape) + dim) if dim < 0 else dim + if isinstance(num_or_sections, int): + assert num_or_sections > 1, 'num_or_sections must be more than 1.' + num = num_or_sections + else: + assert len(num_or_sections) < input_shape[ + dim], 'len(num_or_sections) must not be more than input.shape[dim].' + num = len(num_or_sections) + outs = [ + helper.create_tmp_variable(dtype=helper.input_dtype()) + for i in range(num) + ] + helper.append_op( + type='split', + inputs={'X': input}, + outputs={'Out': outs}, + attrs={ + 'num': num_or_sections if isinstance(num_or_sections, int) else 0, + 'sections': num_or_sections + if isinstance(num_or_sections, list) else [], + 'axis': dim + }) + return outs + + +def l2_normalize(x, axis, epsilon=1e-12, name=None): + """ + **L2 normalize Layer** + + The l2 normalize layer normalizes `x` along dimension `axis` using an L2 + norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes + + output = x / sqrt(max(sum(x**2), epsilon)) + + For `x` with more dimensions, this layer independently normalizes each 1-D + slice along dimension `axis`. + + Args: + x(Variable|list): The input tensor to l2_normalize layer. + axis(int): Dimension along which to normalize the input. + epsilon(float): A lower bound value for `x`'s l2 norm. sqrt(epsilon) will + be used as the divisor if the l2 norm of `x` is less than + sqrt(epsilon). + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + + Returns: + Variable: The output tensor variable. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", + shape=(3, 17, 13), + dtype="float32") + fc = fluid.layers.l2_normalize(x=data, axis=1) + """ + + if len(x.shape) == 1: axis = 0 + + helper = LayerHelper("l2_normalize", **locals()) + + square = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op(type="square", inputs={"X": x}, outputs={"Out": square}) + + reduced_sum = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type="reduce_sum", + inputs={"X": square}, + outputs={"Out": reduced_sum}, + attrs={ + "dim": 1 if axis is None else axis, + "keep_dim": True, + "reduce_all": False + }) + + # TODO(caoying) A lower bound value epsilon for the norm is needed to + # imporve the numeric stability of reciprocal. This requires a maximum_op. + rsquare = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type="reciprocal", inputs={"X": reduced_sum}, outputs={"Out": rsquare}) + + # TODO(caoying) the current elementwise_mul operator does not support a + # general broadcast rule which broadcasts input(Y) to have the same + # dimension with Input(X) starting from a specified dimension. So this + # exanpsion is requred. Once a general broadcast rule is spported, this + # expanding canbe removed. + rsquare_expanded = helper.create_tmp_variable(dtype=x.dtype) + expand_times = [1] * len(x.shape) + expand_times[axis] = int(x.shape[axis]) + helper.append_op( + type="expand", + inputs={"X": rsquare}, + outputs={"Out": rsquare_expanded}, + attrs={"expand_times": expand_times}) + + out = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type="elementwise_mul", + inputs={"X": x, + "Y": rsquare_expanded}, + outputs={"Out": out}) + return out + + +def matmul(x, y, transpose_x=False, transpose_y=False, name=None): + """ + Applies matrix multipication to two tensors. Currently only rank 1 to rank + 3 input tensors are supported. + + The actual behavior depends on the shapes of :math:`x`, :math:`y` and the + flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: + + - If a transpose flag is specified, the last two dimensions of the tensor + are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for + :math:`x` it is treated as :math:`[1, D]` in nontransposed form and as + :math:`[D, 1]` in transposed form, whereas for :math:`y` it is the + opposite: It is treated as :math:`[D, 1]` in nontransposed form and as + :math:`[1, D]` in transposed form. + + - After transpose, the two tensors are 2-D or 3-D and matrix multipication + performs in the following way. + + - If both are 2-D, they are multiplied like conventional matrices. + - If either is 3-D, it is treated as a stack of matrices residing in the + last two dimensions and a batched matrix multiply supporting broadcast + applies on the two tensors. + + Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and + nontransposed, the prepended or appended dimension :math:`1` will be + removed after matrix multipication. + + Args: + x (Variable): The input variable which is a Tensor or LoDTensor. + y (Variable): The input variable which is a Tensor or LoDTensor. + transpose_x (bool): Whether to transpose :math:`x` before multiplication. + transpose_y (bool): Whether to transpose :math:`y` before multiplication. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The product Tensor variable. + + Examples: + .. code-block:: python + + # Examples to clarify shapes of the inputs and output + # x: [B, M, K], y: [B, K, N] + fluid.layers.matmul(x, y) # out: [B, M, N] + # x: [B, M, K], y: [K, N] + fluid.layers.matmul(x, y) # out: [B, M, N] + # x: [B, M, K], y: [K] + fluid.layers.matmul(x, y) # out: [B, M] + # x: [M, K], y: [K, N] + fluid.layers.matmul(x, y) # out: [M, N] + # x: [K], y: [K] + fluid.layers.matmul(x, y) # out: [1] + # x: [M], y: [N] + + fluid.layers.matmul(x, y, True, True) # out: [M, N] + """ + helper = LayerHelper('matmul', **locals()) + assert max( + len(x.shape), len(y.shape) + ) <= 3, 'Currently only rank 1 to rank 3 input tensors are supported.' + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='matmul', + inputs={'X': x, + 'Y': y}, + outputs={'Out': out}, + attrs={'transpose_X': transpose_x, + 'transpose_Y': transpose_y}) + return out diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index 51a85dbbd3357fabc62fb5b43269fdf79da21bfb..21945edf0827e7a86ec2f8ce8f84c9093808c68b 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from ..registry import register_layer __activations__ = [ @@ -42,6 +55,8 @@ __all__ = [ 'elementwise_div', 'elementwise_sub', 'elementwise_mul', + 'elementwise_max', + 'elementwise_min', 'clip', 'sequence_softmax', ] + __activations__ diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 2608a8d1151fafa2da0cf7b605c4fa1210068057..255b9d467839a05447c8999c047d96769ae25f17 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from ..layer_helper import LayerHelper from ..param_attr import ParamAttr from ..framework import convert_np_dtype_to_dtype_ @@ -180,7 +193,7 @@ def assign(input, output): return output -def fill_constant(shape, dtype, value, out=None): +def fill_constant(shape, dtype, value, force_cpu=False, out=None): """ **fill_constant** @@ -211,9 +224,12 @@ def fill_constant(shape, dtype, value, out=None): type='fill_constant', inputs={}, outputs={'Out': [out]}, - attrs={'shape': shape, - 'dtype': out.dtype, - 'value': float(value)}) + attrs={ + 'shape': shape, + 'dtype': out.dtype, + 'value': float(value), + 'force_cpu': force_cpu + }) out.stop_gradient = True return out diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index 293b116957ff9a7c02417bc268b4c0b4b2fc0a15..89ffe26ed1a70942c4994394f9a3635dda13be69 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from collections import defaultdict import framework from framework import Program, default_main_program, Parameter, Variable diff --git a/python/paddle/v2/fluid/net_drawer.py b/python/paddle/v2/fluid/net_drawer.py index 94fdd5e38970b309580de6fc934b158a3c46e464..7448975b59ba13bb29cd0a1bea043add251844e6 100644 --- a/python/paddle/v2/fluid/net_drawer.py +++ b/python/paddle/v2/fluid/net_drawer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import argparse import json import logging diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index 440467e0abc9b74c2a80f68b44d9abd1a9a58991..c53fbd280fd79dc0d598fda2b5796886abee4ed4 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -1,8 +1,23 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import layers __all__ = [ "simple_img_conv_pool", "sequence_conv_pool", + "glu", + "dot_product_attention", ] @@ -107,3 +122,90 @@ def sequence_conv_pool(input, pool_out = layers.sequence_pool(input=conv_out, pool_type=pool_type) return pool_out + + +def glu(input, dim=-1): + """ + The gated linear unit composed by split, sigmoid activation and elementwise + multiplication. Specifically, Split the input into two equal sized parts + :math:`a` and :math:`b` along the given dimension and then compute as + following: + + .. math:: + + {GLU}(a, b)= a \otimes \sigma(b) + + Refer to `Language Modeling with Gated Convolutional Networks + `_. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (int): The dimension along which to split. If :math:`dim < 0`, the + dimension to split along is :math:`rank(input) + dim`. + + Returns: + Variable: The Tensor variable with half the size of input. + + Examples: + .. code-block:: python + + # x is a Tensor variable with shape [3, 6, 9] + fluid.nets.glu(input=x, dim=1) # shape of output: [3, 3, 9] + """ + + a, b = layers.split(input, num_or_sections=2, dim=dim) + act_b = layers.sigmoid(x=b) + out = layers.elementwise_mul(x=a, y=act_b) + return out + + +def dot_product_attention(querys, keys, values): + """ + The dot-product attention. + + Attention mechanism can be seen as mapping a query and a set of key-value + pairs to an output. The output is computed as a weighted sum of the values, + where the weight assigned to each value is computed by a compatibility + function (dot-product here) of the query with the corresponding key. + + The dot-product attention can be implemented through (batch) matrix + multipication as follows: + + .. math:: + + Attention(Q, K, V)= softmax(QK^\mathrm{T})V + + Refer to `Attention Is All You Need + `_. + + Note that batch data containing sequences with different lengths is not + supported by this because of the (batch) matrix multipication. + + Args: + query (Variable): The input variable which is a Tensor or LoDTensor. + key (Variable): The input variable which is a Tensor or LoDTensor. + value (Variable): The input variable which is a Tensor or LoDTensor. + + Returns: + tuple: The Tensor variables representing the output and attention scores. + + Examples: + .. code-block:: python + + # Suppose q, k, v are tensor variables with the following shape: + # q: [3, 5, 9], k: [3, 6, 9], v: [3, 6, 10] + out, attn_scores = fluid.nets.dot_product_attention(q, k, v) + out.shape # [3, 5, 10] + attn_scores.shape # [3, 5, 6] + """ + assert keys.shape[-2] == values.shape[ + -2], 'The shapes of keys and values mismatch.' + assert querys.shape[-1] == keys.shape[ + -1], 'The shapes of querys and keys mismatch.' + product = layers.matmul(x=querys, y=keys, transpose_y=True) + attn_scores = layers.reshape( + x=layers.reshape( + x=product, shape=[-1, product.shape[-1]], act='softmax'), + shape=product.shape) + out = layers.matmul(attn_scores, values) + return out, attn_scores diff --git a/python/paddle/v2/fluid/op.py b/python/paddle/v2/fluid/op.py index 5828803497ec06bc7644da18ca752f61469ca53f..4bc0f79c64876829d601ac06b9ca451a15300fe8 100644 --- a/python/paddle/v2/fluid/op.py +++ b/python/paddle/v2/fluid/op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core as core import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 40721b5e97a3ab2b6fe772635454105f5cdf7b6c..8bd62ef0c02b1cae1dca8d6f1414286dc643794b 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from collections import defaultdict import framework diff --git a/python/paddle/v2/fluid/param_attr.py b/python/paddle/v2/fluid/param_attr.py index ab4561b0423dd73c8c0d529cbf34b52876b1b77c..3af0190590e775d4816410e4dbe0069868ea209c 100644 --- a/python/paddle/v2/fluid/param_attr.py +++ b/python/paddle/v2/fluid/param_attr.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from initializer import Initializer, Xavier, Constant from regularizer import WeightDecayRegularizer diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/v2/fluid/profiler.py index dcecd76224e70d03ed987a5bb104a977a527d218..f049498b9ffbfc915d03673c1d009d0ab5e4f8fc 100644 --- a/python/paddle/v2/fluid/profiler.py +++ b/python/paddle/v2/fluid/profiler.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core as core from contextlib import contextmanager import os diff --git a/python/paddle/v2/fluid/registry.py b/python/paddle/v2/fluid/registry.py index 94b16bca8c95e7d76377b1cd6e60532069fb452f..6c0c3a35185391873fe5bb98d1ed5ee1cf13aa15 100644 --- a/python/paddle/v2/fluid/registry.py +++ b/python/paddle/v2/fluid/registry.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import re import cStringIO import warnings diff --git a/python/paddle/v2/fluid/regularizer.py b/python/paddle/v2/fluid/regularizer.py index 117c45c49f14ab53db5a3a7b8360ba173cc87bf1..e53dee98fd025df8aa5ff2b74d3bdfd901402965 100644 --- a/python/paddle/v2/fluid/regularizer.py +++ b/python/paddle/v2/fluid/regularizer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import framework __all__ = [ diff --git a/python/paddle/v2/fluid/tests/CMakeLists.txt b/python/paddle/v2/fluid/tests/CMakeLists.txt index e795627bfe9e8ad0c196349a332e62e975f20aa3..9a0240cbf65c7a79e29babc2abcb157ada684c5e 100644 --- a/python/paddle/v2/fluid/tests/CMakeLists.txt +++ b/python/paddle/v2/fluid/tests/CMakeLists.txt @@ -5,3 +5,4 @@ foreach(src ${TEST_OPS}) endforeach() add_subdirectory(book) +add_subdirectory(book_distribute) diff --git a/python/paddle/v2/fluid/tests/__init__.py b/python/paddle/v2/fluid/tests/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2619c1c0e9db17c38ccc6e1dd010bd9c1c5966bd 100644 --- a/python/paddle/v2/fluid/tests/__init__.py +++ b/python/paddle/v2/fluid/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index fbf46ac6cba8fa4981cc8a6e8f5434a510c52d7d..904df66dc1869ca4069a3f2e8dbce850c08e4253 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py index 3d336ffe9582ddd9a2031e7aa7e2c26a772820f8..a06486aa08733a589ac9f0c7b65bb8e769eedcb1 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import sys diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index 74ca56182c47de2e74e80a56bf84dcf90ca6c104..42971da0f042e970f7f657e4fd3a66ad7ecf0dc1 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import math import numpy as np diff --git a/python/paddle/v2/fluid/tests/book/test_machine_translation.py b/python/paddle/v2/fluid/tests/book/test_machine_translation.py index e79864b3977ed8111903f9497685ee7ebf76e1da..deeb6b1badc9e12caf4c95f949c3c622b04cf8b4 100644 --- a/python/paddle/v2/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book/test_machine_translation.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py index 35bf8da924dc76475df9bd5e6a4c04f4d204426a..1d5defbed332e8c4c989e6c1f236836bb4b0a3f9 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import numpy as np import paddle.v2 as paddle diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py index 51bfe2973db7bd2ec4b43bb588be4c1fcfb11e74..02da2fcc8544d0f3ccfefa3c88af2f7297d1c76f 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import numpy as np import paddle.v2 as paddle diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index e3cc2a89371233014dec4ba3d730a866722d3eae..47e2afcd83be12c37d95574afff7ccd2e8a781a6 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import paddle.v2 as paddle import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py index f103358edca9bbd2e28c99afd249f97b1d8069ae..b44d2b41e36633ed8feb7dd8c0e3884e48ffe917 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import numpy as np import paddle.v2 as paddle diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py index cd28f04b8574778316d70e7d8a03026f807c3e52..5a139c1dcd41305aa5aece96f6b2aabde0235b95 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py index 633de66bea2af7404ab0d325b425e7b9e63d3e43..fab8a82f85ddeed7131df3777e978cc7c0a1b86e 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 8b928ff9eed41f8945c749058b4177fd023452ba..3d4bbccd33d7d9d0f492a76e04b98d8f7efac91d 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt b/python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d7664469e481344cf9eea84688f068b4fb99dee --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt @@ -0,0 +1,5 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_dist_fit_a_line.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py similarity index 77% rename from python/paddle/v2/fluid/tests/book_distribute/test_dist_fit_a_line.py rename to python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py index bb339c440bd0d229d2ae348cf5a7745b16d156d5..b886071f94768d3373bbf0e0b7655c924b218645 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/test_dist_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_dist_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py similarity index 92% rename from python/paddle/v2/fluid/tests/book_distribute/test_dist_label_semantic_roles.py rename to python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 5fa5e0e5f34e6904e0e66d3ab4149cdfcffeb244..2b5a098ff253b8a96afba7cd03d7f9998ff400af 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/test_dist_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import math import numpy as np diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_dist_word2vec.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py similarity index 85% rename from python/paddle/v2/fluid/tests/book_distribute/test_dist_word2vec.py rename to python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py index b41853784d607c566fc596ab93f2282520778a4b..dc04af5b7b6ee143847685d6cf4da91747afd3ec 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/test_dist_word2vec.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import numpy as np import paddle.v2 as paddle diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index 20b4a8b34cd085ae51e6169f0d4eac58b7f3ffb2..27512c4f7812b6b55d5dc6d1a12c3b83df8b3e6f 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import numpy as np import paddle.v2 as paddle diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_understand_sentiment_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py similarity index 86% rename from python/paddle/v2/fluid/tests/book_distribute/test_understand_sentiment_conv_dist.py rename to python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index db419e23abcd06ca39011b1bef078b0cafb5100e..74f20f3f4cc8c81a38c1ad8ab33df6a07fbcad44 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/test_understand_sentiment_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import os import numpy as np diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py b/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py new file mode 100644 index 0000000000000000000000000000000000000000..f979f642d8f8cf5869cd74d6f89d1d01f5860504 --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py @@ -0,0 +1,52 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import math +import unittest +from paddle.v2.fluid.distribute_transpiler import split_dense_variable +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +import random + + +class TestSplitVar(unittest.TestCase): + def test_check_output(self): + # split below shapes to 10 servers + shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] + expected_sizes = [ + [15], [1024], + [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], + [2040, 2040, 2040, 2040], + [1150, 1150, 1150, 1150, 1150, 1150, 1100] + ] + var_list = [] + program = fluid.Program() + for shape in shapes: + var = program.global_block().create_var( + name=str(random.randint(10000, 99999)), + persistable=True, + # dtype=core.VarDesc.VarType.LOD_TENSOR, + shape=shape) + var_list.append(var) + blocks = split_dense_variable(var_list, 10) + all_sizes = [] + for s in expected_sizes: + for s2 in s: + all_sizes.append(s2) + for i, block_str in enumerate(blocks): + varname, block_id, size = block_str.split(":") + self.assertEqual(int(size), all_sizes[i]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/decorators.py b/python/paddle/v2/fluid/tests/decorators.py index 154619b0e93455922700a12d734967b4d20c4f13..3b314a15e1b054ece50ad5d697c5fac3bbfedbdc 100644 --- a/python/paddle/v2/fluid/tests/decorators.py +++ b/python/paddle/v2/fluid/tests/decorators.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid as fluid __all__ = ['many_times', 'prog_scope'] diff --git a/python/paddle/v2/fluid/tests/demo/fc_gan.py b/python/paddle/v2/fluid/tests/demo/fc_gan.py index cae959593e855f11c04585341d86478b649d17c9..5f9e8f950779be214a5fd18c4b9d3a0c3f74282b 100644 --- a/python/paddle/v2/fluid/tests/demo/fc_gan.py +++ b/python/paddle/v2/fluid/tests/demo/fc_gan.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import errno import math import os diff --git a/python/paddle/v2/fluid/tests/op_test.py b/python/paddle/v2/fluid/tests/op_test.py index 276cf2c5f2daa711f61158107f7d6539e676ef20..c3b2220e6e2ca6285e4b1193620e7c560b1f7bfa 100644 --- a/python/paddle/v2/fluid/tests/op_test.py +++ b/python/paddle/v2/fluid/tests/op_test.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import random diff --git a/python/paddle/v2/fluid/tests/test_accuracy_op.py b/python/paddle/v2/fluid/tests/test_accuracy_op.py index 6f72918b7178bc1f856010f1111f18842f6cc34a..a20abac8a0ce689e1c49f7f7e082137d2cb3fbb4 100644 --- a/python/paddle/v2/fluid/tests/test_accuracy_op.py +++ b/python/paddle/v2/fluid/tests/test_accuracy_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_activation_op.py b/python/paddle/v2/fluid/tests/test_activation_op.py index 03eb7deb9a35933e5a1676a262a371c69151e6d1..a6a6eb9d635ed3ece0a0f22d629955b06098e321 100644 --- a/python/paddle/v2/fluid/tests/test_activation_op.py +++ b/python/paddle/v2/fluid/tests/test_activation_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_adadelta_op.py b/python/paddle/v2/fluid/tests/test_adadelta_op.py index 7105593a98aee9885ba16e3ee0649a6024033ee7..8de6a1f9a9da4834d7b52baade12bb15ef128cad 100644 --- a/python/paddle/v2/fluid/tests/test_adadelta_op.py +++ b/python/paddle/v2/fluid/tests/test_adadelta_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_adagrad_op.py b/python/paddle/v2/fluid/tests/test_adagrad_op.py index 7b2d02fbf4256d2c27383a3452d526271af543a3..30ed092d4894f5c28775748a71f474c46a1cb2d3 100644 --- a/python/paddle/v2/fluid/tests/test_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_adagrad_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py index 7dbc2fa0858a68c5da9e8d48dcb187494357e940..32d00cf702e7fdca2b9f975dcaf93d721fe6cce1 100644 --- a/python/paddle/v2/fluid/tests/test_adam_op.py +++ b/python/paddle/v2/fluid/tests/test_adam_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_adamax_op.py b/python/paddle/v2/fluid/tests/test_adamax_op.py index 8e5a15aa3d12bbaae99cae6fcb627a336e48f684..35b2bc47ed62cb21b6e58a172e5b7e4d34f52eb4 100644 --- a/python/paddle/v2/fluid/tests/test_adamax_op.py +++ b/python/paddle/v2/fluid/tests/test_adamax_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_array_read_write_op.py b/python/paddle/v2/fluid/tests/test_array_read_write_op.py index 01321de8eac34d562d99726b1f4125d1932ab40f..8775cd4f9fb93e439920e29df4b525485254754c 100644 --- a/python/paddle/v2/fluid/tests/test_array_read_write_op.py +++ b/python/paddle/v2/fluid/tests/test_array_read_write_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core import paddle.v2.fluid.layers as layers diff --git a/python/paddle/v2/fluid/tests/test_assign_op.py b/python/paddle/v2/fluid/tests/test_assign_op.py index 1b0c145f1a69678b228bc70e4e4e273f5bcf9888..4ac173c96bd2c02fd0704d8e2c22faa96b65714e 100644 --- a/python/paddle/v2/fluid/tests/test_assign_op.py +++ b/python/paddle/v2/fluid/tests/test_assign_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import op_test import numpy import unittest diff --git a/python/paddle/v2/fluid/tests/test_assign_value_op.py b/python/paddle/v2/fluid/tests/test_assign_value_op.py index 51b99d091825ab3edc2175202ae5d8a364a54378..f4e2ff9bdeb88cc1fa19055a8e1ff4e6156f0477 100644 --- a/python/paddle/v2/fluid/tests/test_assign_value_op.py +++ b/python/paddle/v2/fluid/tests/test_assign_value_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid as fluid import paddle.v2.fluid.layers as layers import op_test diff --git a/python/paddle/v2/fluid/tests/test_auc_op.py b/python/paddle/v2/fluid/tests/test_auc_op.py index 26ea905d88093605dff820b178996a5724becf82..aa74d224d5dcdcce4c478df8f85fb45d05d87f32 100644 --- a/python/paddle/v2/fluid/tests/test_auc_op.py +++ b/python/paddle/v2/fluid/tests/test_auc_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/test_batch_norm_op.py index ac9418549f45f818257d74045cabb9c581816968..fe82b7d7f31aa58d38bddba72e64d471c0a2eec0 100644 --- a/python/paddle/v2/fluid/tests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_batch_norm_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py index f329214dce407fe0382c51b29f0f4c33b562541a..9ef6e08cc189035c55d52ecaf209d5d607de0ed0 100644 --- a/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py +++ b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_beam_search_op.py b/python/paddle/v2/fluid/tests/test_beam_search_op.py index 319a7e49e35b0515e69703b2d03080cd9ffcae9d..f31c737ba6cb7d17405e96506834627c9c5761b4 100644 --- a/python/paddle/v2/fluid/tests/test_beam_search_op.py +++ b/python/paddle/v2/fluid/tests/test_beam_search_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import logging from paddle.v2.fluid.op import Operator, DynamicRecurrentOp import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py b/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py index 080ca43b8269e0f6a9f4d0ce3973f4d4a07a8e2a..aed1bf4d3ae867dafdefe67d36bb350453e3ede0 100644 --- a/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py +++ b/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_calc_gradient.py b/python/paddle/v2/fluid/tests/test_calc_gradient.py index c34c8ff6d143ff2c8ae0def935d2b44982c0764e..b99eeb09cdbb7c2174ea5f29ba254ce342517f9c 100644 --- a/python/paddle/v2/fluid/tests/test_calc_gradient.py +++ b/python/paddle/v2/fluid/tests/test_calc_gradient.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/test_cast_op.py b/python/paddle/v2/fluid/tests/test_cast_op.py index 4e431bb88da6070718d64a68467be20ca87f8fb9..3795b96dbf0f8f50ede2aeda7262ba61c095d6af 100644 --- a/python/paddle/v2/fluid/tests/test_cast_op.py +++ b/python/paddle/v2/fluid/tests/test_cast_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import op_test import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py index 53bf6f815b8c7baf4c92d9fd488b69722ab0bef5..59ef2bbb2feebc5930ff9513598e0ac55376635f 100644 --- a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py +++ b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_clip.py b/python/paddle/v2/fluid/tests/test_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..63353a10963532ec5b35eff22644adf4823243aa --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_clip.py @@ -0,0 +1,80 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +from __future__ import print_function +import numpy as np +import paddle.v2 as paddle +import paddle.v2.fluid as fluid + +BATCH_SIZE = 128 +CLIP_MAX = 2e-6 +CLIP_MIN = -1e-6 + +prog = fluid.framework.Program() + +with fluid.program_guard(main_program=prog): + image = fluid.layers.data(name='x', shape=[784], dtype='float32') + + hidden1 = fluid.layers.fc(input=image, size=128, act='relu') + hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') + predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') + + label = fluid.layers.data(name='y', shape=[1], dtype='int64') + + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + +prog_clip = prog.clone() +prog_clip.block(0).var(hidden1.name).set_error_clip( + fluid.clip.ErrorClipByValue( + max=CLIP_MAX, min=CLIP_MIN)) + +avg_cost_clip = prog_clip.block(0).var(avg_cost.name) +fluid.backward.append_backward(loss=avg_cost) +fluid.backward.append_backward( + loss=avg_cost_clip, callback=fluid.clip.error_clip_callback) + +hidden1_grad = prog.block(0).var(hidden1.name + "@GRAD") +hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD") + +hidden2_grad = prog.block(0).var(hidden2.name + "@GRAD") +hidden2_grad_clip = prog_clip.block(0).var(hidden2.name + "@GRAD") + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=8192), + batch_size=BATCH_SIZE) + +place = fluid.CPUPlace() +exe = fluid.Executor(place) +feeder = fluid.DataFeeder(feed_list=[image, label], place=place) +exe.run(fluid.default_startup_program()) + +count = 0 +for data in train_reader(): + count += 1 + if count > 5: + break + out1, out2 = exe.run(prog, + feed=feeder.feed(data), + fetch_list=[hidden1_grad, hidden2_grad]) + out1_clip, out2_clip = exe.run( + prog_clip, + feed=feeder.feed(data), + fetch_list=[hidden1_grad_clip, hidden2_grad_clip]) + if not ((out1.clip( + min=CLIP_MIN, max=CLIP_MAX) == out1_clip).all() and + (out2 == out2_clip).all()): + exit(1) + +exit(0) diff --git a/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py b/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py index 02f6108a3a661b0e32cd2e7ed65cb4b8cb50c067..5147e75046294cb8fc7673f2e58a4fb8e50e0e53 100644 --- a/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_clip_op.py b/python/paddle/v2/fluid/tests/test_clip_op.py index a7e1bf174408e4139db0435d9f4bb0c885f76705..3338dc61b38c7e46a048458ae81d7592e30bbffb 100644 --- a/python/paddle/v2/fluid/tests/test_clip_op.py +++ b/python/paddle/v2/fluid/tests/test_clip_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_compare_op.py b/python/paddle/v2/fluid/tests/test_compare_op.py index 5d0dfab6ffd1cbbbfbcdb3af60f1868b7b780456..fbf8921e40563cca28ade38465f68eb133b263e4 100644 --- a/python/paddle/v2/fluid/tests/test_compare_op.py +++ b/python/paddle/v2/fluid/tests/test_compare_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import op_test import unittest import numpy diff --git a/python/paddle/v2/fluid/tests/test_concat_op.py b/python/paddle/v2/fluid/tests/test_concat_op.py index a792d1c106ac00efd92e680cfad67f41a7520e26..3e413e15404f64755fdf1e1db936ca34d61d2b03 100644 --- a/python/paddle/v2/fluid/tests/test_concat_op.py +++ b/python/paddle/v2/fluid/tests/test_concat_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_cond_op.py b/python/paddle/v2/fluid/tests/test_cond_op.py index 32e54084e48cf77c569db4dee54a0c89d5108373..5312fa51a253a7151107d508120cb590aa822364 100644 --- a/python/paddle/v2/fluid/tests/test_cond_op.py +++ b/python/paddle/v2/fluid/tests/test_cond_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import logging import paddle.v2.fluid.core as core import unittest diff --git a/python/paddle/v2/fluid/tests/test_conditional_block.py b/python/paddle/v2/fluid/tests/test_conditional_block.py index 7d815123f3454d1457f59202219f9a93bf3d8c31..965e7d39c807265d5884dc10cce228c5d7f5823f 100644 --- a/python/paddle/v2/fluid/tests/test_conditional_block.py +++ b/python/paddle/v2/fluid/tests/test_conditional_block.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.layers as layers import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_const_value.py b/python/paddle/v2/fluid/tests/test_const_value.py index f8c17c2c98674fa67458efa090e166e37f5a6a8a..190bfa779b4a2fac4d425ba9e45223d2aa04173b 100644 --- a/python/paddle/v2/fluid/tests/test_const_value.py +++ b/python/paddle/v2/fluid/tests/test_const_value.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.framework as framework diff --git a/python/paddle/v2/fluid/tests/test_conv2d_op.py b/python/paddle/v2/fluid/tests/test_conv2d_op.py index e9a19d1774f843b94d3817d516880752fafd5628..8b03a3ae16592888119e1c9ea797e7bbe8acd324 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py index 4aec32fc6e7540e3e3c788bbdc20abed147cbc93..b7b86c58fb81d44b153c3b6724d5ebd524db55e3 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_conv3d_op.py b/python/paddle/v2/fluid/tests/test_conv3d_op.py index df911e1a2f04501936fc332c7b4b829af248116e..5b0397cc690caccdc78317071a10eb451afda979 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py index a42a9c4f33ffd5a8ee267fa910ef763301453a03..b08969062a8ccfec063d7bc1ae3af38f5776fce7 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_conv_shift_op.py b/python/paddle/v2/fluid/tests/test_conv_shift_op.py index b9ab21a06a1c6e8e2d1e936a0b4b8a07a59f57b9..14b2640e24cb8bd51111f14c187be73e423d20d2 100644 --- a/python/paddle/v2/fluid/tests/test_conv_shift_op.py +++ b/python/paddle/v2/fluid/tests/test_conv_shift_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_cos_sim_op.py b/python/paddle/v2/fluid/tests/test_cos_sim_op.py index 47557ccb41d1e835b5d04d1b94f54dfc7aa2855a..f6e5e2cbe9ed54d197b6ee640b4ffd1b3c101f99 100644 --- a/python/paddle/v2/fluid/tests/test_cos_sim_op.py +++ b/python/paddle/v2/fluid/tests/test_cos_sim_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_create_op_doc_string.py b/python/paddle/v2/fluid/tests/test_create_op_doc_string.py index 42b6f7a3616bbce53a8cae68a5fc1eda411a7422..6c922642210fd66604a66d1dd520fa5783c522c3 100644 --- a/python/paddle/v2/fluid/tests/test_create_op_doc_string.py +++ b/python/paddle/v2/fluid/tests/test_create_op_doc_string.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.layers as layers diff --git a/python/paddle/v2/fluid/tests/test_crf_decoding_op.py b/python/paddle/v2/fluid/tests/test_crf_decoding_op.py index ab573da31dfb9d7b40e44a79465a61cdc6b62a46..40e80a824a2fdd4ec2a6f2d96a2a6bf14ab74be0 100644 --- a/python/paddle/v2/fluid/tests/test_crf_decoding_op.py +++ b/python/paddle/v2/fluid/tests/test_crf_decoding_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import random import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_crop_op.py b/python/paddle/v2/fluid/tests/test_crop_op.py index 62c883bdc130021d06c33ded9c2865505da0b719..a0b2fc954dda10ad264fecaf8827732b8820c71b 100644 --- a/python/paddle/v2/fluid/tests/test_crop_op.py +++ b/python/paddle/v2/fluid/tests/test_crop_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_cross_entropy_op.py b/python/paddle/v2/fluid/tests/test_cross_entropy_op.py index b81af9364d63bc9b242372e71f175ad047d7c240..f05e6b235656c75cb75c0ae6a8758ceb48a54352 100644 --- a/python/paddle/v2/fluid/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/fluid/tests/test_cross_entropy_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest, randomize_probability diff --git a/python/paddle/v2/fluid/tests/test_data_feeder.py b/python/paddle/v2/fluid/tests/test_data_feeder.py index 454969320321b72342803f507f0054f79f276669..5574766f8fad2272721f515311190c3dc41ace85 100644 --- a/python/paddle/v2/fluid/tests/test_data_feeder.py +++ b/python/paddle/v2/fluid/tests/test_data_feeder.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py b/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py index 674c3fda5c82309bbfbbad936a8b0b26929d42d9..5e745a284316e9be512178f909399ca4c7708901 100644 --- a/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_default_scope_funcs.py b/python/paddle/v2/fluid/tests/test_default_scope_funcs.py index 738e69529ea447e87516d5e0efc098910b966ded..7a62168be90514b933f0a8a58e754609ecaad646 100644 --- a/python/paddle/v2/fluid/tests/test_default_scope_funcs.py +++ b/python/paddle/v2/fluid/tests/test_default_scope_funcs.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.v2.fluid.default_scope_funcs import * import unittest diff --git a/python/paddle/v2/fluid/tests/test_detection_output_op.py b/python/paddle/v2/fluid/tests/test_detection_output_op.py index 080a9743b0182cb7e6dd0030fc306a7f82510a05..147a43628c671658a5d4338da99423eda851b195 100644 --- a/python/paddle/v2/fluid/tests/test_detection_output_op.py +++ b/python/paddle/v2/fluid/tests/test_detection_output_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_dropout_op.py b/python/paddle/v2/fluid/tests/test_dropout_op.py index 2483200212686caf9c46f9c1129b5d8ffdcc9145..f401050dcc39d4bf786e33ef3e8a4c33e0250044 100644 --- a/python/paddle/v2/fluid/tests/test_dropout_op.py +++ b/python/paddle/v2/fluid/tests/test_dropout_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_dyn_rnn.py b/python/paddle/v2/fluid/tests/test_dyn_rnn.py index 8090c5f47814c60034f2f46f00e56c530e0f2c19..a946fea58d67887cac40bd35b14e23752d6a1619 100644 --- a/python/paddle/v2/fluid/tests/test_dyn_rnn.py +++ b/python/paddle/v2/fluid/tests/test_dyn_rnn.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid as fluid import paddle.v2 as paddle import unittest diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index c02c59284e1ca2e28ba2f6c5ec13b241c15fc288..95cc80739d6940612d303a0033c74aa91a7465cc 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy import random import collections @@ -197,7 +210,24 @@ class BaseRNN(object): return numpy.array([o.mean() for o in outs.itervalues()]).mean() -class TestSimpleMul(unittest.TestCase): +class SeedFixedTestCase(unittest.TestCase): + @classmethod + def setUpClass(cls): + """Fix random seeds to remove randomness from tests""" + cls._np_rand_state = numpy.random.get_state() + cls._py_rand_state = random.getstate() + + numpy.random.seed(123) + random.seed(124) + + @classmethod + def tearDownClass(cls): + """Restore random seeds""" + numpy.random.set_state(cls._np_rand_state) + random.setstate(cls._py_rand_state) + + +class TestSimpleMul(SeedFixedTestCase): DATA_NAME = 'X' DATA_WIDTH = 32 PARAM_NAME = 'W' @@ -263,7 +293,7 @@ class TestSimpleMul(unittest.TestCase): self.assertTrue(numpy.allclose(i_g_num, i_g, rtol=0.05)) -class TestSimpleMulWithMemory(unittest.TestCase): +class TestSimpleMulWithMemory(SeedFixedTestCase): DATA_WIDTH = 32 HIDDEN_WIDTH = 20 DATA_NAME = 'X' diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py b/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py new file mode 100644 index 0000000000000000000000000000000000000000..d6878f0b6d07b74612dd47794e254e4d7d98a124 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py @@ -0,0 +1,205 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import unittest +import paddle.v2 as paddle +import paddle.v2.fluid.core as core +import paddle.v2.fluid as fluid +from paddle.v2.fluid.backward import append_backward +import paddle.v2.fluid.framework as framework +from paddle.v2.fluid.framework import Program, switch_main_program +import bisect +import numpy as np + +fluid.default_startup_program().random_seed = 1 + + +class TestDyRnnStaticInput(unittest.TestCase): + def setUp(self): + self._delta = 0.005 + self._max_sequence_len = 3 + self._program = Program() + switch_main_program(self._program) + self.output_dim = 10 + self.place = core.CPUPlace() + self.prepare_x_tensor() + self.prepare_static_input_tensor() + self.exe = fluid.Executor(self.place) + + def prepare_x_tensor(self): + self.x_tensor_dim = 10 + lod = [[0, 2, 3, 6]] + shape = [lod[0][-1], self.x_tensor_dim] + self.x_tensor_data = np.random.random(shape).astype('float32') + self.x_tensor = core.LoDTensor() + self.x_tensor.set_lod(lod) + self.x_tensor.set(self.x_tensor_data, self.place) + + def prepare_static_input_tensor(self): + self.static_input_tensor_dim = 4 + lod = [[0, 1, 3, 6]] + shape = [lod[0][-1], self.static_input_tensor_dim] + self.static_input_data = np.random.random(shape).astype('float32') + self.static_input_tensor = core.LoDTensor() + self.static_input_tensor.set_lod(lod) + self.static_input_tensor.set(self.static_input_data, self.place) + + def fetch_value(self, var): + fetch_outs = self.exe.run(feed={ + 'x_tensor': self.x_tensor, + 'static_input_tensor': self.static_input_tensor + }, + fetch_list=[var], + return_numpy=False) + return self._lodtensor_to_ndarray(fetch_outs[0]) + + def _lodtensor_to_ndarray(self, lod_tensor): + dims = lod_tensor.get_dims() + ndarray = np.zeros(shape=dims).astype('float32') + for i in xrange(np.product(dims)): + ndarray.ravel()[i] = lod_tensor.get_float_element(i) + return ndarray, lod_tensor.lod() + + def build_graph(self, only_forward=False): + x_tensor = fluid.layers.data( + name='x_tensor', + shape=[self.x_tensor_dim], + dtype='float32', + lod_level=1) + x_tensor.stop_gradient = False + + static_input_tensor = fluid.layers.data( + name='static_input_tensor', + shape=[self.static_input_tensor_dim], + dtype='float32', + lod_level=1) + static_input_tensor.stop_gradient = False + + if only_forward: + static_input_out_array = self._program.global_block().create_var( + name='static_input_out_array', + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype='float32') + static_input_out_array.stop_gradient = True + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + step_x = rnn.step_input(x_tensor) + step_static_input = rnn.static_input(static_input_tensor) + if only_forward: + fluid.layers.array_write( + x=step_static_input, + i=rnn.step_idx, + array=static_input_out_array) + last = fluid.layers.sequence_pool( + input=step_static_input, pool_type='last') + projected = fluid.layers.fc(input=[step_x, last], + size=self.output_dim) + rnn.output(projected) + + if only_forward: + static_input_step_outs = [] + step_idx = fluid.layers.fill_constant( + shape=[1], dtype='int64', value=0) + step_idx.stop_gradient = True + + for i in xrange(self._max_sequence_len): + step_out = fluid.layers.array_read(static_input_out_array, + step_idx) + step_out.stop_gradient = True + static_input_step_outs.append(step_out) + fluid.layers.increment(x=step_idx, value=1.0, in_place=True) + + if only_forward: + return static_input_step_outs + + last = fluid.layers.sequence_pool(input=rnn(), pool_type='last') + loss = fluid.layers.mean(x=last) + append_backward(loss) + static_input_grad = self._program.global_block().var( + framework.grad_var_name('static_input_tensor')) + return static_input_grad, loss + + def get_seq_len_from_lod(self, lod): + return [lod[0][i + 1] - lod[0][i] for i in xrange(len(lod[0]) - 1)] + + def get_expected_static_step_outs(self): + x_lod = self.x_tensor.lod() + x_seq_len = self.get_seq_len_from_lod(x_lod) + x_seq_len_sorted = sorted(x_seq_len) + x_sorted_indices = np.argsort(x_seq_len)[::-1] + + static_lod = self.static_input_tensor.lod() + static_sliced = [ + self.static_input_data[static_lod[0][i]:static_lod[0][i + 1]] + for i in xrange(len(static_lod[0]) - 1) + ] + static_seq_len = self.get_seq_len_from_lod(static_lod) + static_reordered = [] + for i in xrange(len(x_sorted_indices)): + static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist()) + static_seq_len_reordered = [ + static_seq_len[x_sorted_indices[i]] + for i in xrange(len(x_sorted_indices)) + ] + + static_step_outs = [] + static_step_lods = [] + + for i in xrange(self._max_sequence_len): + end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1) + lod = [0] + for i in xrange(end): + lod.append(static_seq_len_reordered[i] + lod[-1]) + static_step_lods.append([lod]) + end = lod[-1] + static_step_outs.append( + np.array(static_reordered[:end]).astype('float32')) + + return static_step_outs, static_step_lods + + def test_step_out(self): + static_step_outs = self.build_graph(only_forward=True) + self.exe.run(framework.default_startup_program()) + expected_outs, expected_lods = self.get_expected_static_step_outs() + for i in xrange(self._max_sequence_len): + step_out, lod = self.fetch_value(static_step_outs[i]) + self.assertTrue(np.allclose(step_out, expected_outs[i])) + self.assertTrue(np.allclose(lod, expected_lods[i])) + + def test_network_gradient(self): + static_input_grad, loss = self.build_graph() + self.exe.run(framework.default_startup_program()) + + actual_gradients, actual_lod = self.fetch_value(static_input_grad) + + static_input_shape = self.static_input_tensor.get_dims() + numeric_gradients = np.zeros(shape=static_input_shape).astype('float32') + # calculate numeric gradients + tensor_size = np.product(static_input_shape) + for i in xrange(tensor_size): + origin = self.static_input_tensor.get_float_element(i) + x_pos = origin + self._delta + self.static_input_tensor.set_float_element(i, x_pos) + y_pos = self.fetch_value(loss)[0][0] + x_neg = origin - self._delta + self.static_input_tensor.set_float_element(i, x_neg) + y_neg = self.fetch_value(loss)[0][0] + self.static_input_tensor.set_float_element(i, origin) + numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2 + self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001)) + self.assertTrue(np.allclose(actual_lod, self.static_input_tensor.lod())) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_edit_distance_op.py b/python/paddle/v2/fluid/tests/test_edit_distance_op.py index 38e87728b387bb70a8921a2fe73a4e69701aabe9..cf118df634bb8288456009ebd4954f08d5eb4323 100644 --- a/python/paddle/v2/fluid/tests/test_edit_distance_op.py +++ b/python/paddle/v2/fluid/tests/test_edit_distance_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_elementwise_add_op.py b/python/paddle/v2/fluid/tests/test_elementwise_add_op.py index 57daddd5698f77527bc5b78c436065a851867ae0..3564772fb52882e9e58ea88caeb12c5e91137525 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_add_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_add_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np from op_test import OpTest @@ -27,6 +40,16 @@ class TestElementwiseOp(OpTest): ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) +class TestElementwiseAddOp_scalar(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_add" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(1).astype(np.float32) + } + self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} + + class TestElementwiseAddOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_add" diff --git a/python/paddle/v2/fluid/tests/test_elementwise_div_op.py b/python/paddle/v2/fluid/tests/test_elementwise_div_op.py index 41cb2b7767eb8e01e46e770a5da21b609f4eb911..77b113af7693c4a71a5a13c791cfb3e0420f4ff8 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_div_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_div_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np from op_test import OpTest @@ -32,6 +45,16 @@ class ElementwiseDivOp(OpTest): ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) +class TestElementwiseDivOp_scalar(ElementwiseDivOp): + def setUp(self): + self.op_type = "elementwise_div" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32), + 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) + } + self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} + + class TestElementwiseDivOp_Vector(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" diff --git a/python/paddle/v2/fluid/tests/test_elementwise_max_op.py b/python/paddle/v2/fluid/tests/test_elementwise_max_op.py new file mode 100644 index 0000000000000000000000000000000000000000..9526f0199bf2f1a537a18010ef6b87105589ed07 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_elementwise_max_op.py @@ -0,0 +1,129 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import unittest +import numpy as np +from op_test import OpTest + + +class TestElementwiseOp(OpTest): + def setUp(self): + self.op_type = "elementwise_max" + # If x and y have the same value, the max() is not differentiable. + # So we generate test data by the following method + # to avoid them being too close to each other. + x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") + sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") + y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) + + +class TestElementwiseMaxOp_scalar(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + x = np.random.random_integers(-5, 5, [2, 3, 4]).astype("float32") + y = np.array([0.5]).astype("float32") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxOp_Vector(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + x = np.random.random((32, )).astype("float32") + sgn = np.random.choice([-1, 1], (32, )).astype("float32") + y = x + sgn * np.random.uniform(0.1, 1, (32, )).astype("float32") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) + sgn = np.random.choice([-1, 1], (2, )).astype(np.float32) + y = x[:, 0, 0] + sgn * \ + np.random.uniform(1, 2, (2, )).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': + np.maximum(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1)) + } + + +class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) + sgn = np.random.choice([-1, 1], (3, )).astype(np.float32) + y = x[0, :, 0] + sgn * \ + np.random.uniform(1, 2, (3, )).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': + np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1)) + } + + +class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) + sgn = np.random.choice([-1, 1], (4, )).astype(np.float32) + y = x[0, 0, :] + sgn * \ + np.random.uniform(1, 2, (4, )).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.outputs = { + 'Out': + np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4)) + } + + +class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) + sgn = np.random.choice([-1, 1], (3, 4)).astype(np.float32) + y = x[0, :, :, 0] + sgn * \ + np.random.uniform(1, 2, (3, 4)).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': + np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1)) + } + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_elementwise_min_op.py b/python/paddle/v2/fluid/tests/test_elementwise_min_op.py new file mode 100644 index 0000000000000000000000000000000000000000..b9007282335a32f85defcd1e2b190745abd38794 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_elementwise_min_op.py @@ -0,0 +1,129 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import unittest +import numpy as np +from op_test import OpTest + + +class TestElementwiseOp(OpTest): + def setUp(self): + self.op_type = "elementwise_min" + # If x and y have the same value, the min() is not differentiable. + # So we generate test data by the following method + # to avoid them being too close to each other. + x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") + sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") + y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) + + +class TestElementwiseMinOp_scalar(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_min" + x = np.random.random_integers(-5, 5, [2, 3, 4]).astype("float32") + y = np.array([0.5]).astype("float32") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxOp_Vector(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_min" + x = np.random.random((32, )).astype("float32") + sgn = np.random.choice([-1, 1], (32, )).astype("float32") + y = x + sgn * np.random.uniform(0.1, 1, (32, )).astype("float32") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_min" + x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) + sgn = np.random.choice([-1, 1], (2, )).astype(np.float32) + y = x[:, 0, 0] + sgn * \ + np.random.uniform(1, 2, (2, )).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': + np.minimum(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1)) + } + + +class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_min" + x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) + sgn = np.random.choice([-1, 1], (3, )).astype(np.float32) + y = x[0, :, 0] + sgn * \ + np.random.uniform(1, 2, (3, )).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': + np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1)) + } + + +class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_min" + x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) + sgn = np.random.choice([-1, 1], (4, )).astype(np.float32) + y = x[0, 0, :] + sgn * \ + np.random.uniform(1, 2, (4, )).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.outputs = { + 'Out': + np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4)) + } + + +class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_min" + x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) + sgn = np.random.choice([-1, 1], (3, 4)).astype(np.float32) + y = x[0, :, :, 0] + sgn * \ + np.random.uniform(1, 2, (3, 4)).astype(np.float32) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': + np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1)) + } + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py b/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py index 261ca9cb3da90dee91b016fee98f67b4c19356a1..12dfa6599cd634e1d806980f89e7c013b8eb8754 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np from op_test import OpTest @@ -25,6 +38,16 @@ class ElementwiseMulOp(OpTest): self.check_grad(['X'], 'Out', no_grad_set=set('Y')) +class TestElementwiseMulOp_scalar(ElementwiseMulOp): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(1).astype(np.float32) + } + self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + + class TestElementwiseMulOp_Vector(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" diff --git a/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py b/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py index be982e8c57b30b91c2834bd5db38ea3c89f573ee..cf53d85bbad81f393a6263f8742fc942d357135f 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np from op_test import OpTest @@ -27,6 +40,16 @@ class TestElementwiseOp(OpTest): ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) +class TestElementwiseSubOp_scalar(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_sub" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(1).astype(np.float32) + } + self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + class TestElementwiseSubOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_sub" diff --git a/python/paddle/v2/fluid/tests/test_exception.py b/python/paddle/v2/fluid/tests/test_exception.py index b871f40c4a07ae2db7559e5a0f15664b21e94402..98c4cbe3f2f2ec3a2036a3fdb84aa5ee4600695c 100644 --- a/python/paddle/v2/fluid/tests/test_exception.py +++ b/python/paddle/v2/fluid/tests/test_exception.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core as core import unittest diff --git a/python/paddle/v2/fluid/tests/test_executor_and_mul.py b/python/paddle/v2/fluid/tests/test_executor_and_mul.py index b1ef87c5cb1711c419b401c5950839816f7f4160..e8baf631e52e4acf645b90b0a9d319d296354944 100644 --- a/python/paddle/v2/fluid/tests/test_executor_and_mul.py +++ b/python/paddle/v2/fluid/tests/test_executor_and_mul.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy diff --git a/python/paddle/v2/fluid/tests/test_expand_op.py b/python/paddle/v2/fluid/tests/test_expand_op.py index 0440f7a2bb159bab4923683b5d0980e59e0a69c9..0524f2041fb1b40e5214039fe72d672c1a3691d6 100644 --- a/python/paddle/v2/fluid/tests/test_expand_op.py +++ b/python/paddle/v2/fluid/tests/test_expand_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_feed_fetch_method.py b/python/paddle/v2/fluid/tests/test_feed_fetch_method.py index 178c85b0dd50df61b1fd35ef5d53ebbf39445cb4..718311517dfee51f4e3c724074cd0018a3fa757c 100644 --- a/python/paddle/v2/fluid/tests/test_feed_fetch_method.py +++ b/python/paddle/v2/fluid/tests/test_feed_fetch_method.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py b/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py index 99de6b5d052b41499800afb6181a235da340bc15..0adc487c04ae4af7ab052530b1d3ca75ec1eddbd 100644 --- a/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_fill_constant_op.py b/python/paddle/v2/fluid/tests/test_fill_constant_op.py index dff7b615aa378b0ef932df47241db07eace61a86..50d4ccb3bdd115f8fa5ea53d5751426cac44cedb 100644 --- a/python/paddle/v2/fluid/tests/test_fill_constant_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_constant_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_fill_op.py b/python/paddle/v2/fluid/tests/test_fill_op.py index 88337598c895a5a663ef45fd0800fa950fee1253..42b06ec87c2b1811aa678f0563522f3d90405478 100644 --- a/python/paddle/v2/fluid/tests/test_fill_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py b/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py index cd91769a22f8d6af193efabd8d997913676fbba6..a28bed9697309da29dce1220dc8080e0ad2083fd 100644 --- a/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_framework_debug_str.py b/python/paddle/v2/fluid/tests/test_framework_debug_str.py index a4cbabdb36362c4ca14b76f366b648d6dbdbf7b3..6c82e67220f36217d88d2b7f73ede8f85e11d632 100644 --- a/python/paddle/v2/fluid/tests/test_framework_debug_str.py +++ b/python/paddle/v2/fluid/tests/test_framework_debug_str.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest from paddle.v2.fluid.framework import Program diff --git a/python/paddle/v2/fluid/tests/test_ftrl_op.py b/python/paddle/v2/fluid/tests/test_ftrl_op.py index f77ac4659a9b877829f7ae52dd005d9dd11dac07..599233efd93d91514171c46f70994bc45c9c6722 100644 --- a/python/paddle/v2/fluid/tests/test_ftrl_op.py +++ b/python/paddle/v2/fluid/tests/test_ftrl_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_gather_op.py b/python/paddle/v2/fluid/tests/test_gather_op.py index b0ab429ef1b53640dfb696f6ea2f7b745564b874..95093f9b846b7bfef4ad3b3dd777c5f7980fe83a 100644 --- a/python/paddle/v2/fluid/tests/test_gather_op.py +++ b/python/paddle/v2/fluid/tests/test_gather_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py index 6f6a60ccb3ff17f6a12eec6974b8b2d73885c29f..bf4785211e8e6d4af97f49d67847df7ac72dfa71 100644 --- a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy diff --git a/python/paddle/v2/fluid/tests/test_get_places_op.py b/python/paddle/v2/fluid/tests/test_get_places_op.py index c4346f6786c096026fa9cbd55fbd44c68f2f9981..b44011fb76be712d11bbd72ce95027a439a4d2c1 100644 --- a/python/paddle/v2/fluid/tests/test_get_places_op.py +++ b/python/paddle/v2/fluid/tests/test_get_places_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid as fluid import decorators import unittest diff --git a/python/paddle/v2/fluid/tests/test_gru_op.py b/python/paddle/v2/fluid/tests/test_gru_op.py index fa2c5a53ec4a01b6545e25f773c11277a4d24706..a6647d1bf28b8b4412d40f795d59eb526e0b7781 100644 --- a/python/paddle/v2/fluid/tests/test_gru_op.py +++ b/python/paddle/v2/fluid/tests/test_gru_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import math diff --git a/python/paddle/v2/fluid/tests/test_gru_unit_op.py b/python/paddle/v2/fluid/tests/test_gru_unit_op.py index 501d5aa5797d6def708338692f0861657f951ef7..53f10c32c7cd143b89da77e24555a7f59473f3a1 100644 --- a/python/paddle/v2/fluid/tests/test_gru_unit_op.py +++ b/python/paddle/v2/fluid/tests/test_gru_unit_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import math import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_hinge_loss_op.py b/python/paddle/v2/fluid/tests/test_hinge_loss_op.py index a8757a891faa01413dc6858451f1a988a3e030b5..dc7774d01c0a6d6a80d0e914cd327e1a4f9919e9 100644 --- a/python/paddle/v2/fluid/tests/test_hinge_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_hinge_loss_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_huber_loss_op.py b/python/paddle/v2/fluid/tests/test_huber_loss_op.py index a24fcbec6cc4801118ce4ef97eb4692cd2351c28..18a48bb18ced83d79e602e0103a3e8d0a87c66ef 100644 --- a/python/paddle/v2/fluid/tests/test_huber_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_huber_loss_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_image_classification_layer.py b/python/paddle/v2/fluid/tests/test_image_classification_layer.py index b621d1525e33693869e24e2bb233bc8e257b077f..9d676e87594dfad7ede3ac237effb234f44e8369 100644 --- a/python/paddle/v2/fluid/tests/test_image_classification_layer.py +++ b/python/paddle/v2/fluid/tests/test_image_classification_layer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/test_infer_shape.py b/python/paddle/v2/fluid/tests/test_infer_shape.py index 9f6695ce02de749178046fbb613a58ba591b3dbc..0c2a6f1423c45b3a5c8cb9f0d9b3d7004f997b36 100644 --- a/python/paddle/v2/fluid/tests/test_infer_shape.py +++ b/python/paddle/v2/fluid/tests/test_infer_shape.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_inference_model_io.py b/python/paddle/v2/fluid/tests/test_inference_model_io.py index 71ca3e6c105c4437470f8e9f596e723d879b65e4..c5cad2166bd17241c6c86e5cd0614df36ad2961c 100644 --- a/python/paddle/v2/fluid/tests/test_inference_model_io.py +++ b/python/paddle/v2/fluid/tests/test_inference_model_io.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_initializer.py b/python/paddle/v2/fluid/tests/test_initializer.py index 3175010f48229d04421fc0068af0f0ed90e63af4..fa3c2afeedca4247379a34dea957bcceb9f134e1 100644 --- a/python/paddle/v2/fluid/tests/test_initializer.py +++ b/python/paddle/v2/fluid/tests/test_initializer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import unittest diff --git a/python/paddle/v2/fluid/tests/test_is_empty_op.py b/python/paddle/v2/fluid/tests/test_is_empty_op.py index 0a4dd0f4faf370161e5695d97f0ed4bf73b6ec26..d6876a885f53ae2dc574871013d570a801b73c7f 100644 --- a/python/paddle/v2/fluid/tests/test_is_empty_op.py +++ b/python/paddle/v2/fluid/tests/test_is_empty_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from paddle.v2.fluid.op import Operator diff --git a/python/paddle/v2/fluid/tests/test_l1_norm_op.py b/python/paddle/v2/fluid/tests/test_l1_norm_op.py index 3a1d1689fe6f941e95ca2df171a1e8e03278076d..92484c49f03ebad6f07ff2613c346545d15f1d72 100644 --- a/python/paddle/v2/fluid/tests/test_l1_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_l1_norm_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import unittest from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index a56277d216c28ddeb752a0aad46daca305a685e4..a4e155b534a41e385167e6a6f01e32cfedf580e2 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import unittest diff --git a/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py b/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py index c26634ff20c46e484d600c758be386ec8327d1c1..cd917dff7f26a04cd289470b3191ad08d7935169 100644 --- a/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import random import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_lod_array_length_op.py b/python/paddle/v2/fluid/tests/test_lod_array_length_op.py index 8a4be545eda841dbda33b7c8cae9f91a4199f2f8..f80136cb0d8bf5ae87cfa140d0b8aa895d38568d 100644 --- a/python/paddle/v2/fluid/tests/test_lod_array_length_op.py +++ b/python/paddle/v2/fluid/tests/test_lod_array_length_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.layers as layers from paddle.v2.fluid.executor import Executor diff --git a/python/paddle/v2/fluid/tests/test_lod_rank_table.py b/python/paddle/v2/fluid/tests/test_lod_rank_table.py index 30d619fe318517345195281b17f88e9916b6afb3..673605d79c72ef2d1251659afeee458ee0d0ac91 100644 --- a/python/paddle/v2/fluid/tests/test_lod_rank_table.py +++ b/python/paddle/v2/fluid/tests/test_lod_rank_table.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from paddle.v2.fluid.layers import lod_rank_table, data from paddle.v2.fluid.executor import Executor import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_lod_reset_op.py b/python/paddle/v2/fluid/tests/test_lod_reset_op.py index 652ccecfa443fc95f08f52df766709cb550f4049..d799dbfa217bc6012e1225216882a95fc0998544 100644 --- a/python/paddle/v2/fluid/tests/test_lod_reset_op.py +++ b/python/paddle/v2/fluid/tests/test_lod_reset_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array.py index d6d3e23fd8898a62528d63795d1bff1b72752477..c593b1e06132036ecdf89ef47b1074198c346cc6 100644 --- a/python/paddle/v2/fluid/tests/test_lod_tensor_array.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core import numpy diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py index c552cb033f1ec8f5843490083edee7b2762b5703..5887f9799a11106656306852a7ba4f7b2ef9ebd4 100644 --- a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core import numpy diff --git a/python/paddle/v2/fluid/tests/test_log_loss_op.py b/python/paddle/v2/fluid/tests/test_log_loss_op.py index 2eeaa90758c57ef0d92a8ad7b0a4c1b1f2c38be3..fde99bfaa16e6d260a4eefc295dbc765c3816861 100644 --- a/python/paddle/v2/fluid/tests/test_log_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_log_loss_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_logical_op.py b/python/paddle/v2/fluid/tests/test_logical_op.py index ac90bf839cb96053387bb82c112692136707744c..8c9e8de739f91a94352f1d9cfa5ce225be929b42 100644 --- a/python/paddle/v2/fluid/tests/test_logical_op.py +++ b/python/paddle/v2/fluid/tests/test_logical_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import op_test import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_lookup_table_op.py b/python/paddle/v2/fluid/tests/test_lookup_table_op.py index a56a549e69eaf950df39853a63947a8abac930d7..1ff6b305bc9269af6af5377801b90b2839a7c079 100644 --- a/python/paddle/v2/fluid/tests/test_lookup_table_op.py +++ b/python/paddle/v2/fluid/tests/test_lookup_table_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_lrn_op.py b/python/paddle/v2/fluid/tests/test_lrn_op.py index 9abb09e53a7af8eec69f9bd501c6883dd9df9930..051704617e733823e07db7a96d9dff5a8bb95afa 100644 --- a/python/paddle/v2/fluid/tests/test_lrn_op.py +++ b/python/paddle/v2/fluid/tests/test_lrn_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_lstm_op.py b/python/paddle/v2/fluid/tests/test_lstm_op.py index 77f062e8c8870ec9cc56c9566108abe74665ae30..76ea8def7cbe66f81b2c548fc2cc9fb9a46c77eb 100644 --- a/python/paddle/v2/fluid/tests/test_lstm_op.py +++ b/python/paddle/v2/fluid/tests/test_lstm_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_lstm_unit_op.py b/python/paddle/v2/fluid/tests/test_lstm_unit_op.py index 6bad2e1f7c34c51419424d88b41b809da997eb8f..c97c1e72aaa1e83f89e24ad5f72d022edef5f7b8 100644 --- a/python/paddle/v2/fluid/tests/test_lstm_unit_op.py +++ b/python/paddle/v2/fluid/tests/test_lstm_unit_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py b/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py index 63378cbc4ec95d7d3c49a92f750b55a8dbc22414..3d8c1d19f90ed4e5945646b85cebb20bb5053f53 100644 --- a/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_matmul_op.py b/python/paddle/v2/fluid/tests/test_matmul_op.py index d51572c8ab7c44fa0c6e83e50b56f05780530c61..f7dc4e053217dcceaf9c64e3605286fc0698593b 100644 --- a/python/paddle/v2/fluid/tests/test_matmul_op.py +++ b/python/paddle/v2/fluid/tests/test_matmul_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest @@ -83,18 +96,18 @@ class Generator(object): self.outputs = {'Out': Out} def test_check_output(self): - self.check_output(atol=1e-2) + self.check_output(atol=1e-3) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) + self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-3) def test_check_grad_ignore_x(self): self.check_grad( - ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) + ['Y'], 'Out', max_relative_error=1e-3, no_grad_set=set("X")) def test_check_grad_ignore_y(self): self.check_grad( - ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) + ['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y')) # Generate test cases for all possibilities diff --git a/python/paddle/v2/fluid/tests/test_maxout_op.py b/python/paddle/v2/fluid/tests/test_maxout_op.py index 5fbed43e254b811d38e441e946a73c24f87373de..ed8c0d2b67199fb880b4f314a2817a128bf1ad3a 100644 --- a/python/paddle/v2/fluid/tests/test_maxout_op.py +++ b/python/paddle/v2/fluid/tests/test_maxout_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_mean_op.py b/python/paddle/v2/fluid/tests/test_mean_op.py index 7823abd8f813aad6462c98a9ace9a13dc286a157..f9d7d6921e45d10ab2e22c0764c129a8a473726a 100644 --- a/python/paddle/v2/fluid/tests/test_mean_op.py +++ b/python/paddle/v2/fluid/tests/test_mean_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py b/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py index 5cce75ddb8df50a35156fc2b8b411823711989c0..76f3c4eb644019b122a93fc12512c3bd39a71bbe 100644 --- a/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import unittest diff --git a/python/paddle/v2/fluid/tests/test_minus_op.py b/python/paddle/v2/fluid/tests/test_minus_op.py index c56d7cb548706880dd482bad750f2989c0e9a710..99c0d9056a7172ea40b7db943800a86d4cfbdb46 100644 --- a/python/paddle/v2/fluid/tests/test_minus_op.py +++ b/python/paddle/v2/fluid/tests/test_minus_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py index 33558c6105442b169b2e26abc7f39e15b7fe7322..18e3991b94843cda7316b8e23be02475bb669653 100644 --- a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.layers as layers from paddle.v2.fluid.framework import Program, program_guard, default_main_program, default_startup_program from paddle.v2.fluid.executor import Executor diff --git a/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py b/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py index 33de8ff7219fafa1ddeb9ebd78d77ae4fa240c98..40955283e6a9bfe07613f1a49e1adbc53275436e 100644 --- a/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_momentum_op.py b/python/paddle/v2/fluid/tests/test_momentum_op.py index 638095f7564c8761151a7794f98f9ca797b0083b..8008a5586f13f97b5671c13bafe8bd8d98e5b3ea 100644 --- a/python/paddle/v2/fluid/tests/test_momentum_op.py +++ b/python/paddle/v2/fluid/tests/test_momentum_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_mul_op.py b/python/paddle/v2/fluid/tests/test_mul_op.py index 57d6d7e7e095cab2c3afb60d229fc09da98aed8b..3033b8ef70d2856805687fa7f09bf51fed01ff5a 100644 --- a/python/paddle/v2/fluid/tests/test_mul_op.py +++ b/python/paddle/v2/fluid/tests/test_mul_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_multiplex_op.py b/python/paddle/v2/fluid/tests/test_multiplex_op.py index 5937eb5aa4621556c9b8d59ea83a39d9738c7925..5746ab391e8b4201fc05b7556ad22bbb16fcd8ab 100644 --- a/python/paddle/v2/fluid/tests/test_multiplex_op.py +++ b/python/paddle/v2/fluid/tests/test_multiplex_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_nce.py b/python/paddle/v2/fluid/tests/test_nce.py index 8aeba69769525935c26576ec50035ed50d2ce44f..ce66a7c6b355d9b97b7bee853025e5e9cb6e533b 100644 --- a/python/paddle/v2/fluid/tests/test_nce.py +++ b/python/paddle/v2/fluid/tests/test_nce.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_net.py b/python/paddle/v2/fluid/tests/test_net.py index d9fe55a8af5c750c5c926e875ddbb645f8abb1a0..cc78cb4a56de8256f6c5cb41584d875f0946cd12 100644 --- a/python/paddle/v2/fluid/tests/test_net.py +++ b/python/paddle/v2/fluid/tests/test_net.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core as core from paddle.v2.fluid.op import Operator import unittest diff --git a/python/paddle/v2/fluid/tests/test_norm_op.py b/python/paddle/v2/fluid/tests/test_norm_op.py index 7d56320489b24c5547e045cb51b778851ff94a32..b053522d72bfb95b1d3da960482857f0bf6f6f8d 100644 --- a/python/paddle/v2/fluid/tests/test_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_norm_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_normalization_wrapper.py b/python/paddle/v2/fluid/tests/test_normalization_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..caff63011d038d785472cb38a26a51f3f4cc9288 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_normalization_wrapper.py @@ -0,0 +1,95 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import unittest +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +import numpy as np + + +class TestNormalization(unittest.TestCase): + data_desc = {"name": "input", "shape": (2, 3, 7)} + + def gen_random_input(self): + """Generate random input data. + """ + self.data = np.random.random( + size=self.data_desc["shape"]).astype("float32") + + def set_program(self, axis, epsilon): + """Build the test program. + """ + data = fluid.layers.data( + name=self.data_desc["name"], + shape=self.data_desc["shape"], + dtype="float32", + append_batch_size=False) + data.stop_gradient = False + l2_norm = fluid.layers.l2_normalize(x=data, axis=axis, epsilon=epsilon) + out = fluid.layers.reduce_sum(l2_norm, dim=None) + + fluid.backward.append_backward(loss=out) + self.fetch_list = [l2_norm] + + def run_program(self): + """Run the test program. + """ + places = [core.CPUPlace()] + if core.is_compile_gpu(): + places.append(core.CUDAPlace(0)) + + for place in places: + self.set_inputs(place) + exe = fluid.Executor(place) + + output = exe.run(fluid.default_main_program(), + feed=self.inputs, + fetch_list=self.fetch_list, + return_numpy=True) + self.op_output = output + + def set_inputs(self, place): + """Set the randomly generated data to the test program. + """ + self.inputs = {} + tensor = fluid.Tensor() + tensor.set(self.data, place) + self.inputs[self.data_desc["name"]] = tensor + + def l2_normalize(self, data, axis, epsilon): + """ Compute the groundtruth. + """ + output = data * np.reciprocal( + np.sum(np.square(data), axis=axis, keepdims=True)) + return output + + def test_l2_normalize(self): + """ Test the python wrapper for l2_normalize. + """ + axis = 1 + #TODO(caoying) epsilon is not supported due to lack of a maximum_op. + epsilon = 1e-6 + + self.gen_random_input() + + self.set_program(axis, epsilon) + self.run_program() + + expect_output = self.l2_normalize(self.data, axis, epsilon) + + # check output + self.assertTrue(np.allclose(self.op_output, expect_output, atol=0.001)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_op_support_gpu.py b/python/paddle/v2/fluid/tests/test_op_support_gpu.py index a0eb4bd5fd2cc178ffe0763efdee61524ad6d4bd..741686a87465853b26311944e97cd88738d070fd 100644 --- a/python/paddle/v2/fluid/tests/test_op_support_gpu.py +++ b/python/paddle/v2/fluid/tests/test_op_support_gpu.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_operator.py b/python/paddle/v2/fluid/tests/test_operator.py index c059a2b88b1324935f871b6e9c11efd5652ddd65..e75ee41149c6cf4479cd62620647198bc738406d 100644 --- a/python/paddle/v2/fluid/tests/test_operator.py +++ b/python/paddle/v2/fluid/tests/test_operator.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.op as op diff --git a/python/paddle/v2/fluid/tests/test_operator_desc.py b/python/paddle/v2/fluid/tests/test_operator_desc.py index ce34d95ac8cb2644dee9c551cd8e85b33609919a..ed18fafe339271ce61b891966162bfc0d4a8c48d 100644 --- a/python/paddle/v2/fluid/tests/test_operator_desc.py +++ b/python/paddle/v2/fluid/tests/test_operator_desc.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_optimizer.py b/python/paddle/v2/fluid/tests/test_optimizer.py index 1eadb7d912629024ee21e30b0a5fa4910bb96e06..dbec3a59441dd1aa2e87296d9e4edd7e1a0f1306 100644 --- a/python/paddle/v2/fluid/tests/test_optimizer.py +++ b/python/paddle/v2/fluid/tests/test_optimizer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.framework as framework diff --git a/python/paddle/v2/fluid/tests/test_pad_op.py b/python/paddle/v2/fluid/tests/test_pad_op.py index 55f1774e5755c846f60a2f1df3e705444a81192b..1036b6bcad307a6ce6b4092c84db7f494e8f5811 100644 --- a/python/paddle/v2/fluid/tests/test_pad_op.py +++ b/python/paddle/v2/fluid/tests/test_pad_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 6c4c39ad59c0ec490c7c3b469e9fa219b28735ba..45196ef6fe5230a6b3ead0b64fee09492188da82 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid as fluid @@ -138,24 +151,28 @@ class BaseParallelForTest(unittest.TestCase): class ParallelOpTest(BaseParallelForTest): - def test_simple_fc(self): - def __network__(): - x = fluid.layers.data(shape=[784], dtype='float32', name='img') - # FIXME: This is a bug of parallel.do - x.stop_gradient = False - x = yield x - hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') - loss = fluid.layers.mean(x=hidden) - yield loss + @staticmethod + def __network__(): + x = fluid.layers.data(shape=[784], dtype='float32', name='img') + x = yield x + hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + loss = fluid.layers.mean(x=hidden) + yield loss + def test_simple_fc(self): self.run_test( - callback=__network__, + callback=ParallelOpTest.__network__, feed={ - 'img': - numpy.random.random(size=(128 * 3, 784)).astype('float32') + 'img': numpy.random.random(size=(51, 784)).astype('float32') }, fetch='fc1.w@GRAD') + def test_fc_with_tiny_data(self): + self.run_test( + callback=ParallelOpTest.__network__, + feed={'img': numpy.random.random(size=(1, 784)).astype('float32')}, + fetch='fc1.w@GRAD') + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_parameter.py b/python/paddle/v2/fluid/tests/test_parameter.py index 694344acbbd3b7c80cb0ff48ada843f794061282..e0db318345061ad30178b69313b30f41c0c62164 100644 --- a/python/paddle/v2/fluid/tests/test_parameter.py +++ b/python/paddle/v2/fluid/tests/test_parameter.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest from paddle.v2.fluid.framework import default_main_program import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_pool2d_op.py b/python/paddle/v2/fluid/tests/test_pool2d_op.py index 71accc3f65bb2d65ad7e7c83eb15242f0e1c8aa4..ac8b24e7ad57b536ba9db360a5499c36b783b133 100644 --- a/python/paddle/v2/fluid/tests/test_pool2d_op.py +++ b/python/paddle/v2/fluid/tests/test_pool2d_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_pool3d_op.py b/python/paddle/v2/fluid/tests/test_pool3d_op.py index 8f410862aff5af633968d4c3c919563c874cc200..54b8df8465b44df953555bb1e4d299a48ffb5d66 100644 --- a/python/paddle/v2/fluid/tests/test_pool3d_op.py +++ b/python/paddle/v2/fluid/tests/test_pool3d_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_pool_max_op.py b/python/paddle/v2/fluid/tests/test_pool_max_op.py index 9d2d61c43868701392e90542f3b7fb2c4ea07548..c4ec0e50cc9d11d72faccd1a07b3f1843a9bcec3 100644 --- a/python/paddle/v2/fluid/tests/test_pool_max_op.py +++ b/python/paddle/v2/fluid/tests/test_pool_max_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py b/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py index f6a6c428a26dece01fe2958991edd3edf3a8266e..b75f7152efb0fbd56c87ff5b49d1a353aee5af25 100644 --- a/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py +++ b/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import itertools import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_precision_recall_op.py b/python/paddle/v2/fluid/tests/test_precision_recall_op.py index d3dbdb6e2aba6dfe98440ad07083cf1ffda5b668..87c7fcb4b5f06e530959f53b8d247cced0196350 100644 --- a/python/paddle/v2/fluid/tests/test_precision_recall_op.py +++ b/python/paddle/v2/fluid/tests/test_precision_recall_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_prelu_op.py b/python/paddle/v2/fluid/tests/test_prelu_op.py index 7be932ac8f6b82283fecd32ac4b3b7bb9aff0338..38bd260bc92e2776b408a8c8b16ac905bb1de537 100644 --- a/python/paddle/v2/fluid/tests/test_prelu_op.py +++ b/python/paddle/v2/fluid/tests/test_prelu_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_print_op.py b/python/paddle/v2/fluid/tests/test_print_op.py index 86a701a020fc197d69d113f82a4e5ac58f377179..4e42863af45353b29d54daf76c9ab3608b217298 100644 --- a/python/paddle/v2/fluid/tests/test_print_op.py +++ b/python/paddle/v2/fluid/tests/test_print_op.py @@ -1,20 +1,67 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest -import numpy as np -from paddle.v2.fluid.executor import Executor import paddle.v2.fluid.core as core -import paddle.v2.fluid.layers as pd +from paddle.v2.fluid.executor import Executor +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.backward import append_backward +from paddle.v2.fluid.framework import switch_main_program +from paddle.v2.fluid.framework import Program +import numpy as np + + +class TestPrintOpCPU(unittest.TestCase): + def setUp(self): + self.place = core.CPUPlace() + self.x_tensor = core.LoDTensor() + tensor_np = np.random.random(size=(2, 3)).astype('float32') + self.x_tensor.set(tensor_np, self.place) + self.x_tensor.set_lod([[0, 1, 1]]) + def build_network(self, only_forward, **kargs): + x = layers.data('x', shape=[3], dtype='float32', lod_level=1) + x.stop_gradient = False + printed = layers.Print(input=x, **kargs) + if only_forward: return printed + loss = layers.mean(x=printed) + append_backward(loss=loss) + return loss -class TestSumOp(unittest.TestCase): - def test_tensor(self): - i = pd.zeros(shape=[2, 10], dtype='float32') + def test_forward(self): + switch_main_program(Program()) + printed = self.build_network(True, print_phase='forward') + exe = Executor(self.place) + outs = exe.run(feed={'x': self.x_tensor}, + fetch_list=[printed], + return_numpy=False) - pd.Print(i, message="I am a message", summarize=10) + def test_backward(self): + switch_main_program(Program()) + loss = self.build_network(False, print_phase='backward') + exe = Executor(self.place) + outs = exe.run(feed={'x': self.x_tensor}, + fetch_list=[loss], + return_numpy=False) - cpu = core.CPUPlace() - exe = Executor(cpu) - exe.run() +class TestPrintOpGPU(TestPrintOpCPU): + def setUp(self): + self.place = core.CUDAPlace(0) + self.x_tensor = core.LoDTensor() + tensor_np = np.random.random(size=(2, 3)).astype('float32') + self.x_tensor.set(tensor_np, self.place) + self.x_tensor.set_lod([[0, 1, 1]]) if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_profiler.py b/python/paddle/v2/fluid/tests/test_profiler.py index e3f3ac58ef9b30864849770510f7339749dab84f..4b439a16aa24fa41870b3e156247df4dbf5b5098 100644 --- a/python/paddle/v2/fluid/tests/test_profiler.py +++ b/python/paddle/v2/fluid/tests/test_profiler.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import paddle.v2.fluid as fluid diff --git a/python/paddle/v2/fluid/tests/test_program.py b/python/paddle/v2/fluid/tests/test_program.py index 447c746aacc1c9455d7a023bca625d548ab2638b..bcaeede93e43235d4c0288715dc19c7205e6a7c0 100644 --- a/python/paddle/v2/fluid/tests/test_program.py +++ b/python/paddle/v2/fluid/tests/test_program.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from __future__ import print_function import unittest diff --git a/python/paddle/v2/fluid/tests/test_protobuf.py b/python/paddle/v2/fluid/tests/test_protobuf.py index e064374176fa221cfd042b7dbd2ddcb3b5ec41ec..5f0646d03603dfe3e37ac23e398812517db9ac17 100644 --- a/python/paddle/v2/fluid/tests/test_protobuf.py +++ b/python/paddle/v2/fluid/tests/test_protobuf.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 import unittest diff --git a/python/paddle/v2/fluid/tests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/test_protobuf_descs.py index d8abe17606c4ddb2ff51d5f918b1e5d7e110f7fa..24638dc0e8a74b60fa0a3ed1852093728c0bc79c 100644 --- a/python/paddle/v2/fluid/tests/test_protobuf_descs.py +++ b/python/paddle/v2/fluid/tests/test_protobuf_descs.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py b/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py index f89a493ab7a7a3d841088b7db37bff4dfbe63735..c197d850f978a2e0de67e0252d79c429c66c1a77 100644 --- a/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_proximal_gd_op.py b/python/paddle/v2/fluid/tests/test_proximal_gd_op.py index 9ca79ce6b3b710244e4f65db70b305231a9f3fcf..15452558252d09e8a81ab647a64340c30029b505 100644 --- a/python/paddle/v2/fluid/tests/test_proximal_gd_op.py +++ b/python/paddle/v2/fluid/tests/test_proximal_gd_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_rank_loss_op.py b/python/paddle/v2/fluid/tests/test_rank_loss_op.py index 0e41ab1b3fd8fa8b62c5f3b914b752918119a265..b4ba7920cd7aaf610075d3dee37f0b7825b387bd 100644 --- a/python/paddle/v2/fluid/tests/test_rank_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_rank_loss_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/test_recurrent_op.py index 84f4e36fa7312fbcb96cc66ff26e234c3016df30..bcc3457aa3ae6dce161d0d58d6fd965b0cb87f11 100644 --- a/python/paddle/v2/fluid/tests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/test_recurrent_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.layers as layers diff --git a/python/paddle/v2/fluid/tests/test_reduce_op.py b/python/paddle/v2/fluid/tests/test_reduce_op.py index a021d4dd91bb9cc1e5d85411b3813b966ef5b296..57ee307ba66b47bd15864e7be3943b4f5237eb1e 100644 --- a/python/paddle/v2/fluid/tests/test_reduce_op.py +++ b/python/paddle/v2/fluid/tests/test_reduce_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_registry.py b/python/paddle/v2/fluid/tests/test_registry.py index f8328f31cf8203f5ea8af2c14417879616ccab71..dba11896307ae52877628ab7a51854b80c7a6fec 100644 --- a/python/paddle/v2/fluid/tests/test_registry.py +++ b/python/paddle/v2/fluid/tests/test_registry.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import warnings diff --git a/python/paddle/v2/fluid/tests/test_regularizer.py b/python/paddle/v2/fluid/tests/test_regularizer.py index 890c881a126a32344128652691c6cad45e02e82d..9eaae1904a01ca7994ad493c3b37352b3d50bab6 100644 --- a/python/paddle/v2/fluid/tests/test_regularizer.py +++ b/python/paddle/v2/fluid/tests/test_regularizer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.framework as framework diff --git a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py index 8b79d448e263d00849877c29158d7898bafe1937..0bcdfafcf4496a4f47c69afb7acd24427cbb634c 100644 --- a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py +++ b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid as fluid import paddle.v2.fluid.core as core @@ -6,8 +19,8 @@ import numpy class TestReorderLoDTensor(unittest.TestCase): num_seq = 5 - # [name, dim, lod_level] pair indicating data info of source and target - data_desc = (['input', 9, 0], ['ref', 5, 1]) + # [name, shape, lod_level] pair indicating data info of source and target + data_desc = (['input', [9], 0], ['ref', [5], 1]) @classmethod def setUpClass(cls): @@ -16,10 +29,10 @@ class TestReorderLoDTensor(unittest.TestCase): @classmethod def set_program(cls): dat = fluid.layers.data( - name=cls.data_desc[0][0], shape=[cls.data_desc[0][1]]) + name=cls.data_desc[0][0], shape=cls.data_desc[0][1]) dat.stop_gradient = False rank_dat = fluid.layers.data( - name=cls.data_desc[1][0], shape=[cls.data_desc[1][1]]) + name=cls.data_desc[1][0], shape=cls.data_desc[1][1]) table = fluid.layers.lod_rank_table(rank_dat) new_dat = fluid.layers.reorder_lod_tensor_by_rank( x=dat, rank_table=table) @@ -49,7 +62,7 @@ class TestReorderLoDTensor(unittest.TestCase): self.data = {} for desc in self.data_desc: data_name = desc[0] - data_dim = desc[1] + data_shape = desc[1] data_lod_level = desc[2] data_lod = [] for i in range(data_lod_level): @@ -59,9 +72,9 @@ class TestReorderLoDTensor(unittest.TestCase): size=self.num_seq if i == 0 else lod_level_i[-1]) lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist() data_lod.append(lod_level_i) - data_value = numpy.random.random(size=[ - data_lod[-1][-1] if data_lod else self.num_seq, data_dim - ]).astype('float32') + data_value = numpy.random.random( + size=[data_lod[-1][-1] if data_lod else self.num_seq + ] + data_shape).astype('float32') self.data[data_name] = (data_value, data_lod) def set_inputs(self, place): @@ -163,8 +176,6 @@ class TestReorderLoDTensor(unittest.TestCase): numpy.allclose( numpy.array(actual_grad), expect_grad, atol=0.001)) self.assertEqual(expect_grad_lod, actual_grad.lod()) - global outputs_from_tensor_implicit_lod - outputs_from_tensor_implicit_lod = self.actual_outputs # compare outputs between LodTensors with explicit and implicit lod # use the same data but set the input lod explicitly diff --git a/python/paddle/v2/fluid/tests/test_reshape_op.py b/python/paddle/v2/fluid/tests/test_reshape_op.py index 18ee3aece656276fec9671df9baf298b7fd3c9b1..d6e6797043dc07cdc62f41a3d6dac43fc25934d0 100644 --- a/python/paddle/v2/fluid/tests/test_reshape_op.py +++ b/python/paddle/v2/fluid/tests/test_reshape_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_rmsprop_op.py b/python/paddle/v2/fluid/tests/test_rmsprop_op.py index 237bcfccceee89f62fc05e4c6c972a76d1875367..27a1ea213714271fb373e270add039bc4667e6fd 100644 --- a/python/paddle/v2/fluid/tests/test_rmsprop_op.py +++ b/python/paddle/v2/fluid/tests/test_rmsprop_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py b/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py index d1bb20f37a3785f70bee072b9df282bba4012c16..378d7f852304e4cc41ed760663457e470985afbc 100644 --- a/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py +++ b/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest from paddle.v2.fluid.framework import Program diff --git a/python/paddle/v2/fluid/tests/test_roi_pool_op.py b/python/paddle/v2/fluid/tests/test_roi_pool_op.py index a28d9c7f82d3735c410369eb61e350168c267cea..6d7a698b09e288696caf9ad8460f8df32c11b009 100644 --- a/python/paddle/v2/fluid/tests/test_roi_pool_op.py +++ b/python/paddle/v2/fluid/tests/test_roi_pool_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import math diff --git a/python/paddle/v2/fluid/tests/test_row_conv_op.py b/python/paddle/v2/fluid/tests/test_row_conv_op.py index 1ed86e23ac28a575cdc3388e9da547918eb8a1be..1234d289cb2f021669b227237be9b0032f4ce935 100644 --- a/python/paddle/v2/fluid/tests/test_row_conv_op.py +++ b/python/paddle/v2/fluid/tests/test_row_conv_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_scale_op.py b/python/paddle/v2/fluid/tests/test_scale_op.py index 2ea1e185470280730ae8c8c0ea9568bbeb43eaf5..9847d3d36198c1078958e06ad87590f812e4eaa8 100644 --- a/python/paddle/v2/fluid/tests/test_scale_op.py +++ b/python/paddle/v2/fluid/tests/test_scale_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_scatter_op.py b/python/paddle/v2/fluid/tests/test_scatter_op.py index 1032269d5dfb02e3518b9ef2820d5d0dcc8a51a0..b6c4162f6f47c5eb5b8d6a0308fc80baeb37e14c 100644 --- a/python/paddle/v2/fluid/tests/test_scatter_op.py +++ b/python/paddle/v2/fluid/tests/test_scatter_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_scope.py b/python/paddle/v2/fluid/tests/test_scope.py index e4857b590aa6e09f1fa37c4a8a70a3ec9495b085..adaaf1690627e20a9e8d8b21796d061cb6fdc2dc 100644 --- a/python/paddle/v2/fluid/tests/test_scope.py +++ b/python/paddle/v2/fluid/tests/test_scope.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core import unittest diff --git a/python/paddle/v2/fluid/tests/test_selected_rows.py b/python/paddle/v2/fluid/tests/test_selected_rows.py index 93daf37aa2ceb8a599973f7b02874f23fe0763ff..3179a3caaecfc17a71f15f41d69cb82369ed5998 100644 --- a/python/paddle/v2/fluid/tests/test_selected_rows.py +++ b/python/paddle/v2/fluid/tests/test_selected_rows.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_seq_concat_op.py b/python/paddle/v2/fluid/tests/test_seq_concat_op.py index dccc6ed8afe2315da74f6886878b15d58b26b3c9..1f026fd76e83a270a7b20815b6ad1f397c062bd7 100644 --- a/python/paddle/v2/fluid/tests/test_seq_concat_op.py +++ b/python/paddle/v2/fluid/tests/test_seq_concat_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import sys diff --git a/python/paddle/v2/fluid/tests/test_seq_conv.py b/python/paddle/v2/fluid/tests/test_seq_conv.py index 14edc5f953022ca05f5620c28bd7276d961dd4d0..c7e508519446fc69758d0dbc91b1ea9bacb9f11b 100644 --- a/python/paddle/v2/fluid/tests/test_seq_conv.py +++ b/python/paddle/v2/fluid/tests/test_seq_conv.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import random diff --git a/python/paddle/v2/fluid/tests/test_seq_pool.py b/python/paddle/v2/fluid/tests/test_seq_pool.py index 512d8b315f29cecf79ae274dca491c240f3447a1..bb15495373fb083b1c7dc031c7286dceef7e4ecf 100644 --- a/python/paddle/v2/fluid/tests/test_seq_pool.py +++ b/python/paddle/v2/fluid/tests/test_seq_pool.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_sequence_erase_op.py b/python/paddle/v2/fluid/tests/test_sequence_erase_op.py index bf257fefea0d98c6f4d9860dbac4ccedf59bcdd9..650984009a76a56fb65811f7bb805ca656194a35 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_erase_op.py +++ b/python/paddle/v2/fluid/tests/test_sequence_erase_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_sequence_expand.py b/python/paddle/v2/fluid/tests/test_sequence_expand.py index 0f22612d3dbe483e4d5a8638636e44e172160156..aacdabf295dc1c26c62db5ebfa6961df9fed5816 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_expand.py +++ b/python/paddle/v2/fluid/tests/test_sequence_expand.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_sequence_slice_op.py b/python/paddle/v2/fluid/tests/test_sequence_slice_op.py index ccd9a05343b0c4aa05b258959665c0662f271512..94062431f0b4297535c20c4b86a11a8058c8876e 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_slice_op.py +++ b/python/paddle/v2/fluid/tests/test_sequence_slice_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import sys diff --git a/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py b/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py index 8bffdd585699bfae2262bcfcd0387d22fa1e62db..8170e4d7f18d0fe28d8f927f122139369ee672fe 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py +++ b/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_sgd_op.py b/python/paddle/v2/fluid/tests/test_sgd_op.py index 14d41e172a22c677235ab3fa997ef6f0b6e39778..4a71fb30a9c7a185be8150a476a6bb317d19e3eb 100644 --- a/python/paddle/v2/fluid/tests/test_sgd_op.py +++ b/python/paddle/v2/fluid/tests/test_sgd_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py index a14721b9aacfa7437623024af41555fd26990499..1825a5258fa1ab08c405d20e3e77b4d92a200e7f 100644 --- a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py +++ b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core from paddle.v2.fluid.executor import Executor diff --git a/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py index c42f578f72cb121a24d6b852334cbd8a977f2730..132502c9cba5af71b2477cded1a9bc63ed842a56 100644 --- a/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np from op_test import OpTest from scipy.special import logit diff --git a/python/paddle/v2/fluid/tests/test_sign_op.py b/python/paddle/v2/fluid/tests/test_sign_op.py index c6b59bcfd8ba71e54d4c3a2b7a3dac1f2a346265..f649cb9e7cd9938c697a8c36a88d5b39507e4269 100644 --- a/python/paddle/v2/fluid/tests/test_sign_op.py +++ b/python/paddle/v2/fluid/tests/test_sign_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py b/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py index b7f13c5699918d4969300499bd03e1668b2a4bca..1052eaa8b0ec281208344175a9978e25528f4e5d 100644 --- a/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_softmax_op.py b/python/paddle/v2/fluid/tests/test_softmax_op.py index 136fc0283afd6acf1de4baae5e681789662295ce..d03e50b2f1edb43ada22b0357fc136c5edccd6fe 100644 --- a/python/paddle/v2/fluid/tests/test_softmax_op.py +++ b/python/paddle/v2/fluid/tests/test_softmax_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py index c2f07f9096c69f3d4977f9444bdd5dcda8028973..330467081b41d980599dff7804e84a8bc000912a 100644 --- a/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py index 2e4defd55d75c2012f39bea30a6c4de12528e77c..4e90404eca49213c1fe2a5a35a912cd048ce7af8 100644 --- a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.core as core import numpy as np diff --git a/python/paddle/v2/fluid/tests/test_split_op.py b/python/paddle/v2/fluid/tests/test_split_op.py index 37c6ebb89d1c3bcfc3c80a54a1e92c0326e046e3..000c300446f7a026477eba2e854b94d20969b5a3 100644 --- a/python/paddle/v2/fluid/tests/test_split_op.py +++ b/python/paddle/v2/fluid/tests/test_split_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_spp_op.py b/python/paddle/v2/fluid/tests/test_spp_op.py index 007723f0e35ad194c427401337bc9b13756576de..f09bb94b449f2868575d081490027efc472b5b95 100644 --- a/python/paddle/v2/fluid/tests/test_spp_op.py +++ b/python/paddle/v2/fluid/tests/test_spp_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py b/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py index dc6ebf5d30369231b4918a168bbdf25c7096c808..7b80d81d728197df7a089e35356e847234a8d3c9 100644 --- a/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py +++ b/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py b/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py index 5a52c6a66c781672a483324083b97a3c5894f508..80994f5937ec09d754f8782128bb36214321a3c7 100644 --- a/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy as np import unittest from numpy import linalg as LA diff --git a/python/paddle/v2/fluid/tests/test_sum_op.py b/python/paddle/v2/fluid/tests/test_sum_op.py index 60254291e2ab9215e2bc37c12d5e2e1ca6d33d5d..366708ac839643949e36cebe29392f6d4b8d5e6a 100644 --- a/python/paddle/v2/fluid/tests/test_sum_op.py +++ b/python/paddle/v2/fluid/tests/test_sum_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_tensor.py b/python/paddle/v2/fluid/tests/test_tensor.py index 9f870d9eb3485aa0b54eb781b906f4232d12c49e..62a48b206cd3fb1adefe8a015cf3e2df17a779aa 100644 --- a/python/paddle/v2/fluid/tests/test_tensor.py +++ b/python/paddle/v2/fluid/tests/test_tensor.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import paddle.v2.fluid.core as core import unittest import numpy diff --git a/python/paddle/v2/fluid/tests/test_top_k_op.py b/python/paddle/v2/fluid/tests/test_top_k_op.py index 6e8fbefa6eafa391cdb5e17c882ee74b5bdc6507..86968dba140c4e0b6e94b9bf78255bcbb753434d 100644 --- a/python/paddle/v2/fluid/tests/test_top_k_op.py +++ b/python/paddle/v2/fluid/tests/test_top_k_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_transpose_op.py b/python/paddle/v2/fluid/tests/test_transpose_op.py index 9409cbaa00f792b60d5950556b869108aa732478..ff2541f450ca8ce374404aef65f1caa2d5f4f00b 100644 --- a/python/paddle/v2/fluid/tests/test_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_transpose_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/test_uniform_random_op.py index dbe4d6bcd069d2088b3cc1b4efd575d14afd4198..332ac4f07ff936569c43aaab4a698f2b8e3fb985 100644 --- a/python/paddle/v2/fluid/tests/test_uniform_random_op.py +++ b/python/paddle/v2/fluid/tests/test_uniform_random_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy diff --git a/python/paddle/v2/fluid/tests/test_unpool_op.py b/python/paddle/v2/fluid/tests/test_unpool_op.py index e87f283042c081ed9f232d140ff8c303cd3d1858..988c0c75063f42f8865388979e14a00db3733400 100644 --- a/python/paddle/v2/fluid/tests/test_unpool_op.py +++ b/python/paddle/v2/fluid/tests/test_unpool_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import numpy as np from op_test import OpTest diff --git a/python/paddle/v2/fluid/tests/test_variable.py b/python/paddle/v2/fluid/tests/test_variable.py index f1e4c0ba21d5c4f10d2b5011bdb5abaebaec5431..199fd4a8c26fc2ebcd50655fd2ad131b08383071 100644 --- a/python/paddle/v2/fluid/tests/test_variable.py +++ b/python/paddle/v2/fluid/tests/test_variable.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest from paddle.v2.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_ import paddle.v2.fluid.core as core diff --git a/python/paddle/v2/fluid/tests/test_warpctc_op.py b/python/paddle/v2/fluid/tests/test_warpctc_op.py index 59390d5303b9642ede0d421e908a1b129c68a072..9f565676c5af1685704681758a8590ecb6f59026 100644 --- a/python/paddle/v2/fluid/tests/test_warpctc_op.py +++ b/python/paddle/v2/fluid/tests/test_warpctc_op.py @@ -1,9 +1,24 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import sys import unittest import numpy as np from op_test import OpTest from test_softmax_op import stable_softmax +CUDA_BLOCK_SIZE = 512 + class CTCForward(object): def __init__(self, softmax, softmax_lod, labels, labels_lod, blank, @@ -154,47 +169,63 @@ class CTCForward(object): class TestWarpCTCOp(OpTest): + def config(self): + self.batch_size = 4 + self.num_classes = 8 + self.logits_lod = [[0, 4, 5, 8, 11]] + self.labels_lod = [[0, 3, 4, 8, 12]] + self.blank = self.num_classes - 1 + self.norm_by_times = False + def setUp(self): self.op_type = "warpctc" + self.config() - batch_size = 4 - num_classes = 8 - logits_lod = [[0, 4, 5, 8, 11]] - logits = np.random.uniform(0.1, 1.0, - [11, num_classes]).astype("float32") + logits = np.random.uniform( + 0.1, 1.0, + [self.logits_lod[0][-1], self.num_classes]).astype("float32") softmax = np.apply_along_axis(stable_softmax, 1, logits) - labels_lod = [[0, 3, 4, 8, 12]] # labels should not be blank - labels = np.random.randint(0, num_classes - 1, [12, 1], dtype="int32") - - blank = num_classes - 1 - norm_by_times = False + labels = np.random.randint( + 0, self.num_classes - 1, [self.labels_lod[0][-1], 1], dtype="int32") - ctc = CTCForward(softmax, logits_lod, labels, labels_lod, blank, - norm_by_times) + ctc = CTCForward(softmax, self.logits_lod, labels, self.labels_lod, + self.blank, self.norm_by_times) loss = ctc.forward() max_sequence_length = 0 - for i in range(batch_size): - max_sequence_length = max(max_sequence_length, - logits_lod[0][i + 1] - logits_lod[0][i]) - gradient = np.zeros( - [max_sequence_length, batch_size, num_classes], dtype="float32") + for i in range(self.batch_size): + max_sequence_length = max( + max_sequence_length, + self.logits_lod[0][i + 1] - self.logits_lod[0][i]) + self.gradient = np.zeros( + [max_sequence_length, self.batch_size, self.num_classes], + dtype="float32") self.inputs = { - "Logits": (logits, logits_lod), - "Label": (labels, labels_lod) + "Logits": (logits, self.logits_lod), + "Label": (labels, self.labels_lod) } self.outputs = {"Loss": loss} - self.attrs = {"blank": blank, "norm_by_times": norm_by_times} + self.attrs = {"blank": self.blank, "norm_by_times": self.norm_by_times} def test_check_output(self): self.check_output() + def test_check_grad(self): + self.outputs['WarpCTCGrad'] = self.gradient + self.check_grad(["Logits"], "Loss", max_relative_error=0.007) + + +class TestWarpCTCOpCase1(TestWarpCTCOp): + def config(self): + self.batch_size = 4 + self.num_classes = CUDA_BLOCK_SIZE + 2 + self.logits_lod = [[0, 4, 5, 8, 11]] + self.labels_lod = [[0, 3, 4, 8, 12]] + self.blank = 0 + self.norm_by_times = False -# def test_check_grad(self): -# self.outputs["WarpCTCGrad"] = None -# self.check_grad(["Logits"], "Loss", max_relative_error=0.01) if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_while_op.py b/python/paddle/v2/fluid/tests/test_while_op.py index 7c5593cc5e5a66d4ccb237e3706ff3e544adf033..72de0a03612053ba3a07526726a65606275dc61c 100644 --- a/python/paddle/v2/fluid/tests/test_while_op.py +++ b/python/paddle/v2/fluid/tests/test_while_op.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import paddle.v2.fluid.layers as layers from paddle.v2.fluid.executor import Executor diff --git a/python/paddle/v2/image.py b/python/paddle/v2/image.py index 7408ea8ef611ddfa74dc5bb6ef45d4e0ccb9d141..a6fa0cecb87e86e804815012885678a9fc557d95 100644 --- a/python/paddle/v2/image.py +++ b/python/paddle/v2/image.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ This file contains some common interfaces for image preprocess. Many users are confused about the image layout. We introduce diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 9148cb56cf78e1ebb994f4a4a34d4a1b6e2e6ef4..39d1bfff0c8659bb87b9b97334f377639cea9c59 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import numpy import collections import topology diff --git a/python/paddle/v2/master/__init__.py b/python/paddle/v2/master/__init__.py index c8975b5d4a33cbecb4fa5a144bc610c36591d629..09daaaa75e01969ded25dcc848df3f7b9202124e 100644 --- a/python/paddle/v2/master/__init__.py +++ b/python/paddle/v2/master/__init__.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. from client import * __all__ = ['client'] diff --git a/python/paddle/v2/master/client.py b/python/paddle/v2/master/client.py index fc718f031e2267e737adbc340226e145bf614bf2..b874c2f349094c0c0ab9e3663fcc7491f1edd236 100644 --- a/python/paddle/v2/master/client.py +++ b/python/paddle/v2/master/client.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import ctypes import os diff --git a/python/paddle/v2/reader/tests/__init__.py b/python/paddle/v2/reader/tests/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2619c1c0e9db17c38ccc6e1dd010bd9c1c5966bd 100644 --- a/python/paddle/v2/reader/tests/__init__.py +++ b/python/paddle/v2/reader/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. diff --git a/python/paddle/v2/tests/test_parameters.py b/python/paddle/v2/tests/test_parameters.py index 7ba8a939fbd1a949d61a007b40c054e7543c0cbc..ab6863620feacd4db7f83eec976811ab0097f5e7 100644 --- a/python/paddle/v2/tests/test_parameters.py +++ b/python/paddle/v2/tests/test_parameters.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. import unittest import sys diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index db01ab7374eca18b6063dc634da5ef83c4bc9adc..1a70a7203b7e358ce654621d686319179b511249 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. """ Module Trainer """ diff --git a/tools/manylinux1/build_scripts/manylinux1-check.py b/tools/manylinux1/build_scripts/manylinux1-check.py index 47fd3d673be662d2229480ee650dc3799301c31e..e4bde065a293c9d8ea8c5b150246766328138fd4 100644 --- a/tools/manylinux1/build_scripts/manylinux1-check.py +++ b/tools/manylinux1/build_scripts/manylinux1-check.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. # Logic copied from PEP 513 diff --git a/tools/manylinux1/build_scripts/ssl-check.py b/tools/manylinux1/build_scripts/ssl-check.py index a85d91978c510cccd366c174c317e6a3bdb589bd..900185cef14c51bca6a929801a86728b7ffc0b4a 100644 --- a/tools/manylinux1/build_scripts/ssl-check.py +++ b/tools/manylinux1/build_scripts/ssl-check.py @@ -1,3 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. # cf. https://github.com/pypa/manylinux/issues/53 GOOD_SSL = "https://google.com" diff --git a/v1_api_demo/README.md b/v1_api_demo/README.md deleted file mode 100644 index 0460a85fae078800332982751a5d4a9644c50bd6..0000000000000000000000000000000000000000 --- a/v1_api_demo/README.md +++ /dev/null @@ -1,5 +0,0 @@ -The examples in v1_api_demo are using v1_api currently, and will be upgraded to v2_api later. -Thus, v1_api_demo is a temporary directory. We decide not to maintain it and will delete it in future. - -Please go to [PaddlePaddle/book](https://github.com/PaddlePaddle/book) and -[PaddlePaddle/models](https://github.com/PaddlePaddle/models) to learn PaddlePaddle. diff --git a/v1_api_demo/gan/.gitignore b/v1_api_demo/gan/.gitignore deleted file mode 100644 index 93a6f5080a16a601cffb0bff51af9aef3ba3bae7..0000000000000000000000000000000000000000 --- a/v1_api_demo/gan/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -output/ -uniform_params/ -cifar_params/ -mnist_params/ -*.png -.pydevproject -.project -*.log -*.pyc -data/mnist_data/ -data/cifar-10-batches-py/ diff --git a/v1_api_demo/gan/README.md b/v1_api_demo/gan/README.md deleted file mode 100644 index 1908b534b0c1f63904d5503399b961d74ce0037c..0000000000000000000000000000000000000000 --- a/v1_api_demo/gan/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Generative Adversarial Networks (GAN) - -This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434). - -The general training procedures are implemented in gan_trainer.py. The neural network configurations are specified in gan_conf.py (for synthetic data) and gan_conf_image.py (for image data). - -In order to run the model, first download the corresponding data by running the shell script in ./data. -Then you can run the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). - -$python gan_trainer.py -d cifar --use_gpu 1 - -The generated images will be stored in ./cifar_samples/ -The corresponding models will be stored in ./cifar_params/ diff --git a/v1_api_demo/gan/data/download_cifar.sh b/v1_api_demo/gan/data/download_cifar.sh deleted file mode 100755 index bbadc7c10c73e45a0948018b8812f79040d14bc4..0000000000000000000000000000000000000000 --- a/v1_api_demo/gan/data/download_cifar.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz -tar zxf cifar-10-python.tar.gz -rm cifar-10-python.tar.gz diff --git a/v1_api_demo/gan/data/get_mnist_data.sh b/v1_api_demo/gan/data/get_mnist_data.sh deleted file mode 100755 index a77c81bf5af9ddb6634ff89460797ca543c5e517..0000000000000000000000000000000000000000 --- a/v1_api_demo/gan/data/get_mnist_data.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env sh -# This script downloads the mnist data and unzips it. -set -e -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -rm -rf "$DIR/mnist_data" -mkdir "$DIR/mnist_data" -cd "$DIR/mnist_data" - -echo "Downloading..." - -for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte -do - if [ ! -e $fname ]; then - wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz - gunzip ${fname}.gz - fi -done diff --git a/v1_api_demo/gan/gan_conf.py b/v1_api_demo/gan/gan_conf.py deleted file mode 100644 index 86ac2dffe5f4490a88e12d1fa5e8cd9fa61a69f4..0000000000000000000000000000000000000000 --- a/v1_api_demo/gan/gan_conf.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddle.trainer_config_helpers import * - -mode = get_config_arg("mode", str, "generator") -assert mode in set([ - "generator", "discriminator", "generator_training", "discriminator_training" -]) - -is_generator_training = mode == "generator_training" -is_discriminator_training = mode == "discriminator_training" -is_generator = mode == "generator" -is_discriminator = mode == "discriminator" - -# The network structure below follows the ref https://arxiv.org/abs/1406.2661 -# Here we used two hidden layers and batch_norm - -print('mode=%s' % mode) -# the dim of the noise (z) as the input of the generator network -noise_dim = 10 -# the dim of the hidden layer -hidden_dim = 10 -# the dim of the generated sample -sample_dim = 2 - -settings( - batch_size=128, - learning_rate=1e-4, - learning_method=AdamOptimizer(beta1=0.5)) - - -def discriminator(sample): - """ - discriminator ouputs the probablity of a sample is from generator - or real data. - The output has two dimenstional: dimension 0 is the probablity - of the sample is from generator and dimension 1 is the probabblity - of the sample is from real data. - """ - param_attr = ParamAttr(is_static=is_generator_training) - bias_attr = ParamAttr( - is_static=is_generator_training, initial_mean=1.0, initial_std=0) - - hidden = fc_layer( - input=sample, - name="dis_hidden", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=ReluActivation()) - - hidden2 = fc_layer( - input=hidden, - name="dis_hidden2", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - hidden_bn = batch_norm_layer( - hidden2, - act=ReluActivation(), - name="dis_hidden_bn", - bias_attr=bias_attr, - param_attr=ParamAttr( - is_static=is_generator_training, initial_mean=1.0, - initial_std=0.02), - use_global_stats=False) - - return fc_layer( - input=hidden_bn, - name="dis_prob", - size=2, - bias_attr=bias_attr, - param_attr=param_attr, - act=SoftmaxActivation()) - - -def generator(noise): - """ - generator generates a sample given noise - """ - param_attr = ParamAttr(is_static=is_discriminator_training) - bias_attr = ParamAttr( - is_static=is_discriminator_training, initial_mean=1.0, initial_std=0) - - hidden = fc_layer( - input=noise, - name="gen_layer_hidden", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=ReluActivation()) - - hidden2 = fc_layer( - input=hidden, - name="gen_hidden2", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - hidden_bn = batch_norm_layer( - hidden2, - act=ReluActivation(), - name="gen_layer_hidden_bn", - bias_attr=bias_attr, - param_attr=ParamAttr( - is_static=is_discriminator_training, - initial_mean=1.0, - initial_std=0.02), - use_global_stats=False) - - return fc_layer( - input=hidden_bn, - name="gen_layer1", - size=sample_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - -if is_generator_training: - noise = data_layer(name="noise", size=noise_dim) - sample = generator(noise) - -if is_discriminator_training: - sample = data_layer(name="sample", size=sample_dim) - -if is_generator_training or is_discriminator_training: - label = data_layer(name="label", size=1) - prob = discriminator(sample) - cost = cross_entropy(input=prob, label=label) - classification_error_evaluator( - input=prob, label=label, name=mode + '_error') - outputs(cost) - -if is_generator: - noise = data_layer(name="noise", size=noise_dim) - outputs(generator(noise)) diff --git a/v1_api_demo/gan/gan_conf_image.py b/v1_api_demo/gan/gan_conf_image.py deleted file mode 100644 index c469227994c1a84d1aa73e03bbc74ebeac41d30e..0000000000000000000000000000000000000000 --- a/v1_api_demo/gan/gan_conf_image.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddle.trainer_config_helpers import * - -mode = get_config_arg("mode", str, "generator") -dataSource = get_config_arg("data", str, "mnist") -assert mode in set([ - "generator", "discriminator", "generator_training", "discriminator_training" -]) - -is_generator_training = mode == "generator_training" -is_discriminator_training = mode == "discriminator_training" -is_generator = mode == "generator" -is_discriminator = mode == "discriminator" - -# The network structure below follows the dcgan paper -# (https://arxiv.org/abs/1511.06434) - -print('mode=%s' % mode) -# the dim of the noise (z) as the input of the generator network -noise_dim = 100 -# the number of filters in the layer in generator/discriminator that is -# closet to the image -gf_dim = 64 -df_dim = 64 -if dataSource == "mnist": - sample_dim = 28 # image dim - c_dim = 1 # image color -else: - sample_dim = 32 - c_dim = 3 -s2, s4 = int(sample_dim / 2), int(sample_dim / 4), -s8, s16 = int(sample_dim / 8), int(sample_dim / 16) - -settings( - batch_size=128, - learning_rate=2e-4, - learning_method=AdamOptimizer(beta1=0.5)) - - -def conv_bn(input, - channels, - imgSize, - num_filters, - output_x, - stride, - name, - param_attr, - bias_attr, - param_attr_bn, - bn, - trans=False, - act=ReluActivation()): - """ - conv_bn is a utility function that constructs a convolution/deconv layer - with an optional batch_norm layer - - :param bn: whether to use batch_norm_layer - :type bn: bool - :param trans: whether to use conv (False) or deconv (True) - :type trans: bool - """ - - # calculate the filter_size and padding size based on the given - # imgSize and ouput size - tmp = imgSize - (output_x - 1) * stride - if tmp <= 1 or tmp > 5: - raise ValueError("conv input-output dimension does not fit") - elif tmp <= 3: - filter_size = tmp + 2 - padding = 1 - else: - filter_size = tmp - padding = 0 - - print(imgSize, output_x, stride, filter_size, padding) - - if trans: - nameApx = "_convt" - else: - nameApx = "_conv" - - if bn: - conv = img_conv_layer( - input, - filter_size=filter_size, - num_filters=num_filters, - name=name + nameApx, - num_channels=channels, - act=LinearActivation(), - groups=1, - stride=stride, - padding=padding, - bias_attr=bias_attr, - param_attr=param_attr, - shared_biases=True, - layer_attr=None, - filter_size_y=None, - stride_y=None, - padding_y=None, - trans=trans) - - conv_bn = batch_norm_layer( - conv, - act=act, - name=name + nameApx + "_bn", - bias_attr=bias_attr, - param_attr=param_attr_bn, - use_global_stats=False) - - return conv_bn - else: - conv = img_conv_layer( - input, - filter_size=filter_size, - num_filters=num_filters, - name=name + nameApx, - num_channels=channels, - act=act, - groups=1, - stride=stride, - padding=padding, - bias_attr=bias_attr, - param_attr=param_attr, - shared_biases=True, - layer_attr=None, - filter_size_y=None, - stride_y=None, - padding_y=None, - trans=trans) - return conv - - -def generator(noise): - """ - generator generates a sample given noise - """ - param_attr = ParamAttr( - is_static=is_discriminator_training, initial_mean=0.0, initial_std=0.02) - bias_attr = ParamAttr( - is_static=is_discriminator_training, initial_mean=0.0, initial_std=0.0) - - param_attr_bn = ParamAttr( - is_static=is_discriminator_training, initial_mean=1.0, initial_std=0.02) - - h1 = fc_layer( - input=noise, - name="gen_layer_h1", - size=s8 * s8 * gf_dim * 4, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - h1_bn = batch_norm_layer( - h1, - act=ReluActivation(), - name="gen_layer_h1_bn", - bias_attr=bias_attr, - param_attr=param_attr_bn, - use_global_stats=False) - - h2_bn = conv_bn( - h1_bn, - channels=gf_dim * 4, - output_x=s8, - num_filters=gf_dim * 2, - imgSize=s4, - stride=2, - name="gen_layer_h2", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True, - trans=True) - - h3_bn = conv_bn( - h2_bn, - channels=gf_dim * 2, - output_x=s4, - num_filters=gf_dim, - imgSize=s2, - stride=2, - name="gen_layer_h3", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True, - trans=True) - - return conv_bn( - h3_bn, - channels=gf_dim, - output_x=s2, - num_filters=c_dim, - imgSize=sample_dim, - stride=2, - name="gen_layer_h4", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=False, - trans=True, - act=TanhActivation()) - - -def discriminator(sample): - """ - discriminator ouputs the probablity of a sample is from generator - or real data. - The output has two dimenstional: dimension 0 is the probablity - of the sample is from generator and dimension 1 is the probabblity - of the sample is from real data. - """ - param_attr = ParamAttr( - is_static=is_generator_training, initial_mean=0.0, initial_std=0.02) - bias_attr = ParamAttr( - is_static=is_generator_training, initial_mean=0.0, initial_std=0.0) - - param_attr_bn = ParamAttr( - is_static=is_generator_training, initial_mean=1.0, initial_std=0.02) - - h0 = conv_bn( - sample, - channels=c_dim, - imgSize=sample_dim, - num_filters=df_dim, - output_x=s2, - stride=2, - name="dis_h0", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=False) - - h1_bn = conv_bn( - h0, - channels=df_dim, - imgSize=s2, - num_filters=df_dim * 2, - output_x=s4, - stride=2, - name="dis_h1", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True) - - h2_bn = conv_bn( - h1_bn, - channels=df_dim * 2, - imgSize=s4, - num_filters=df_dim * 4, - output_x=s8, - stride=2, - name="dis_h2", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True) - - return fc_layer( - input=h2_bn, - name="dis_prob", - size=2, - bias_attr=bias_attr, - param_attr=param_attr, - act=SoftmaxActivation()) - - -if is_generator_training: - noise = data_layer(name="noise", size=noise_dim) - sample = generator(noise) - -if is_discriminator_training: - sample = data_layer(name="sample", size=sample_dim * sample_dim * c_dim) - -if is_generator_training or is_discriminator_training: - label = data_layer(name="label", size=1) - prob = discriminator(sample) - cost = cross_entropy(input=prob, label=label) - classification_error_evaluator( - input=prob, label=label, name=mode + '_error') - outputs(cost) - -if is_generator: - noise = data_layer(name="noise", size=noise_dim) - outputs(generator(noise)) diff --git a/v1_api_demo/gan/gan_trainer.py b/v1_api_demo/gan/gan_trainer.py deleted file mode 100644 index 4a26c230f7a21cc6dd4a3cdb52e32730b1ce73ca..0000000000000000000000000000000000000000 --- a/v1_api_demo/gan/gan_trainer.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import random -import numpy -import cPickle -import sys, os -from PIL import Image - -from paddle.trainer.config_parser import parse_config -from paddle.trainer.config_parser import logger -import py_paddle.swig_paddle as api -import matplotlib.pyplot as plt - - -def plot2DScatter(data, outputfile): - ''' - Plot the data as a 2D scatter plot and save to outputfile - data needs to be two dimensinoal - ''' - x = data[:, 0] - y = data[:, 1] - logger.info("The mean vector is %s" % numpy.mean(data, 0)) - logger.info("The std vector is %s" % numpy.std(data, 0)) - - heatmap, xedges, yedges = numpy.histogram2d(x, y, bins=50) - extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] - - plt.clf() - plt.scatter(x, y) - plt.savefig(outputfile, bbox_inches='tight') - - -def CHECK_EQ(a, b): - assert a == b, "a=%s, b=%s" % (a, b) - - -def copy_shared_parameters(src, dst): - ''' - copy the parameters from src to dst - :param src: the source of the parameters - :type src: GradientMachine - :param dst: the destination of the parameters - :type dst: GradientMachine - ''' - src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] - src_params = dict([(p.getName(), p) for p in src_params]) - - for i in xrange(dst.getParameterSize()): - dst_param = dst.getParameter(i) - src_param = src_params.get(dst_param.getName(), None) - if src_param is None: - continue - src_value = src_param.getBuf(api.PARAMETER_VALUE) - dst_value = dst_param.getBuf(api.PARAMETER_VALUE) - CHECK_EQ(len(src_value), len(dst_value)) - dst_value.copyFrom(src_value) - dst_param.setValueUpdated() - - -def print_parameters(src): - src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] - - print "***************" - for p in src_params: - print "Name is %s" % p.getName() - print "value is %s \n" % p.getBuf(api.PARAMETER_VALUE).copyToNumpyArray( - ) - - -def load_mnist_data(imageFile): - f = open(imageFile, "rb") - f.read(16) - - # Define number of samples for train/test - if "train" in imageFile: - n = 60000 - else: - n = 10000 - - data = numpy.fromfile(f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)) - data = data / 255.0 * 2.0 - 1.0 - - f.close() - return data.astype('float32') - - -def load_cifar_data(cifar_path): - batch_size = 10000 - data = numpy.zeros((5 * batch_size, 32 * 32 * 3), dtype="float32") - for i in range(1, 6): - file = cifar_path + "/data_batch_" + str(i) - fo = open(file, 'rb') - dict = cPickle.load(fo) - fo.close() - data[(i - 1) * batch_size:(i * batch_size), :] = dict["data"] - - data = data / 255.0 * 2.0 - 1.0 - return data - - -# synthesize 2-D uniform data -def load_uniform_data(): - data = numpy.random.rand(1000000, 2).astype('float32') - return data - - -def merge(images, size): - if images.shape[1] == 28 * 28: - h, w, c = 28, 28, 1 - else: - h, w, c = 32, 32, 3 - img = numpy.zeros((h * size[0], w * size[1], c)) - for idx in xrange(size[0] * size[1]): - i = idx % size[1] - j = idx // size[1] - img[j*h:j*h+h, i*w:i*w+w, :] = \ - ((images[idx, :].reshape((h, w, c), order="F").transpose(1, 0, 2) + 1.0) / 2.0 * 255.0) - return img.astype('uint8') - - -def save_images(images, path): - merged_img = merge(images, [8, 8]) - if merged_img.shape[2] == 1: - im = Image.fromarray(numpy.squeeze(merged_img)).convert('RGB') - else: - im = Image.fromarray(merged_img, mode="RGB") - im.save(path) - - -def get_real_samples(batch_size, data_np): - return data_np[numpy.random.choice( - data_np.shape[0], batch_size, replace=False), :] - - -def get_noise(batch_size, noise_dim): - return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32') - - -def get_fake_samples(generator_machine, batch_size, noise): - gen_inputs = api.Arguments.createArguments(1) - gen_inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise)) - gen_outputs = api.Arguments.createArguments(0) - generator_machine.forward(gen_inputs, gen_outputs, api.PASS_TEST) - fake_samples = gen_outputs.getSlotValue(0).copyToNumpyMat() - return fake_samples - - -def get_training_loss(training_machine, inputs): - outputs = api.Arguments.createArguments(0) - training_machine.forward(inputs, outputs, api.PASS_TEST) - loss = outputs.getSlotValue(0).copyToNumpyMat() - return numpy.mean(loss) - - -def prepare_discriminator_data_batch_pos(batch_size, data_np): - real_samples = get_real_samples(batch_size, data_np) - labels = numpy.ones(batch_size, dtype='int32') - inputs = api.Arguments.createArguments(2) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(real_samples)) - inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels)) - return inputs - - -def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise): - fake_samples = get_fake_samples(generator_machine, batch_size, noise) - labels = numpy.zeros(batch_size, dtype='int32') - inputs = api.Arguments.createArguments(2) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(fake_samples)) - inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels)) - return inputs - - -def prepare_generator_data_batch(batch_size, noise): - label = numpy.ones(batch_size, dtype='int32') - inputs = api.Arguments.createArguments(2) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise)) - inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(label)) - return inputs - - -def find(iterable, cond): - for item in iterable: - if cond(item): - return item - return None - - -def get_layer_size(model_conf, layer_name): - layer_conf = find(model_conf.layers, lambda x: x.name == layer_name) - assert layer_conf is not None, "Cannot find '%s' layer" % layer_name - return layer_conf.size - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-d", "--data_source", help="mnist or cifar or uniform") - parser.add_argument( - "--use_gpu", default="1", help="1 means use gpu for training") - parser.add_argument("--gpu_id", default="0", help="the gpu_id parameter") - args = parser.parse_args() - data_source = args.data_source - use_gpu = args.use_gpu - assert data_source in ["mnist", "cifar", "uniform"] - assert use_gpu in ["0", "1"] - - if not os.path.exists("./%s_samples/" % data_source): - os.makedirs("./%s_samples/" % data_source) - - if not os.path.exists("./%s_params/" % data_source): - os.makedirs("./%s_params/" % data_source) - - api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', - '--log_period=100', '--gpu_id=' + args.gpu_id, - '--save_dir=' + "./%s_params/" % data_source) - - if data_source == "uniform": - conf = "gan_conf.py" - num_iter = 10000 - else: - conf = "gan_conf_image.py" - num_iter = 1000 - - gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source) - dis_conf = parse_config(conf, - "mode=discriminator_training,data=" + data_source) - generator_conf = parse_config(conf, "mode=generator,data=" + data_source) - batch_size = dis_conf.opt_config.batch_size - noise_dim = get_layer_size(gen_conf.model_config, "noise") - - if data_source == "mnist": - data_np = load_mnist_data("./data/mnist_data/train-images-idx3-ubyte") - elif data_source == "cifar": - data_np = load_cifar_data("./data/cifar-10-batches-py/") - else: - data_np = load_uniform_data() - - # this creates a gradient machine for discriminator - dis_training_machine = api.GradientMachine.createFromConfigProto( - dis_conf.model_config) - # this create a gradient machine for generator - gen_training_machine = api.GradientMachine.createFromConfigProto( - gen_conf.model_config) - - # generator_machine is used to generate data only, which is used for - # training discriminator - logger.info(str(generator_conf.model_config)) - generator_machine = api.GradientMachine.createFromConfigProto( - generator_conf.model_config) - - dis_trainer = api.Trainer.create(dis_conf, dis_training_machine) - - gen_trainer = api.Trainer.create(gen_conf, gen_training_machine) - - dis_trainer.startTrain() - gen_trainer.startTrain() - - # Sync parameters between networks (GradientMachine) at the beginning - copy_shared_parameters(gen_training_machine, dis_training_machine) - copy_shared_parameters(gen_training_machine, generator_machine) - - # constrain that either discriminator or generator can not be trained - # consecutively more than MAX_strike times - curr_train = "dis" - curr_strike = 0 - MAX_strike = 5 - - for train_pass in xrange(100): - dis_trainer.startTrainPass() - gen_trainer.startTrainPass() - for i in xrange(num_iter): - # Do forward pass in discriminator to get the dis_loss - noise = get_noise(batch_size, noise_dim) - data_batch_dis_pos = prepare_discriminator_data_batch_pos( - batch_size, data_np) - dis_loss_pos = get_training_loss(dis_training_machine, - data_batch_dis_pos) - - data_batch_dis_neg = prepare_discriminator_data_batch_neg( - generator_machine, batch_size, noise) - dis_loss_neg = get_training_loss(dis_training_machine, - data_batch_dis_neg) - - dis_loss = (dis_loss_pos + dis_loss_neg) / 2.0 - - # Do forward pass in generator to get the gen_loss - data_batch_gen = prepare_generator_data_batch(batch_size, noise) - gen_loss = get_training_loss(gen_training_machine, data_batch_gen) - - if i % 100 == 0: - print "d_pos_loss is %s d_neg_loss is %s" % (dis_loss_pos, - dis_loss_neg) - print "d_loss is %s g_loss is %s" % (dis_loss, gen_loss) - - # Decide which network to train based on the training history - # And the relative size of the loss - if (not (curr_train == "dis" and curr_strike == MAX_strike)) and \ - ((curr_train == "gen" and curr_strike == MAX_strike) or dis_loss > gen_loss): - if curr_train == "dis": - curr_strike += 1 - else: - curr_train = "dis" - curr_strike = 1 - dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_neg) - dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_pos) - copy_shared_parameters(dis_training_machine, - gen_training_machine) - - else: - if curr_train == "gen": - curr_strike += 1 - else: - curr_train = "gen" - curr_strike = 1 - gen_trainer.trainOneDataBatch(batch_size, data_batch_gen) - # TODO: add API for paddle to allow true parameter sharing between different GradientMachines - # so that we do not need to copy shared parameters. - copy_shared_parameters(gen_training_machine, - dis_training_machine) - copy_shared_parameters(gen_training_machine, generator_machine) - - dis_trainer.finishTrainPass() - gen_trainer.finishTrainPass() - # At the end of each pass, save the generated samples/images - fake_samples = get_fake_samples(generator_machine, batch_size, noise) - if data_source == "uniform": - plot2DScatter(fake_samples, "./%s_samples/train_pass%s.png" % - (data_source, train_pass)) - else: - save_images(fake_samples, "./%s_samples/train_pass%s.png" % - (data_source, train_pass)) - dis_trainer.finishTrain() - gen_trainer.finishTrain() - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/mnist/.gitignore b/v1_api_demo/mnist/.gitignore deleted file mode 100644 index 7e61d5e3a0cabd46d4185454d46610ac2ee2e63f..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -data/raw_data -data/*.list -mnist_vgg_model -plot.png -train.log -*pyc -.ipynb_checkpoints -params.pkl -params.tar -params.tar.gz diff --git a/v1_api_demo/mnist/api_train.py b/v1_api_demo/mnist/api_train.py deleted file mode 100644 index ea1caa7dd9653a2cc2860ace736fe3d25a3767e0..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/api_train.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -A very basic example for how to use current Raw SWIG API to train mnist network. - -Current implementation uses Raw SWIG, which means the API call is directly \ -passed to C++ side of Paddle. - -The user api could be simpler and carefully designed. -""" -import random - -import numpy as np -import paddle.v2 as paddle_v2 -import py_paddle.swig_paddle as api -from paddle.trainer_config_helpers import * -from py_paddle import DataProviderConverter - -from mnist_util import read_from_mnist - - -def init_parameter(network): - assert isinstance(network, api.GradientMachine) - for each_param in network.getParameters(): - assert isinstance(each_param, api.Parameter) - array_size = len(each_param) - array = np.random.uniform(-1.0, 1.0, array_size).astype('float32') - each_param.getBuf(api.PARAMETER_VALUE).copyFromNumpyArray(array) - - -def generator_to_batch(generator, batch_size): - ret_val = list() - for each_item in generator: - ret_val.append(each_item) - if len(ret_val) == batch_size: - yield ret_val - ret_val = list() - if len(ret_val) != 0: - yield ret_val - - -class BatchPool(object): - def __init__(self, generator, batch_size): - self.data = list(generator) - self.batch_size = batch_size - - def __call__(self): - random.shuffle(self.data) - for offset in xrange(0, len(self.data), self.batch_size): - limit = min(offset + self.batch_size, len(self.data)) - yield self.data[offset:limit] - - -def input_order_converter(generator): - for each_item in generator: - yield each_item['pixel'], each_item['label'] - - -def main(): - api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores - - optimizer = paddle_v2.optimizer.Adam( - learning_rate=1e-4, - batch_size=1000, - model_average=ModelAverage(average_window=0.5), - regularization=L2Regularization(rate=0.5)) - - # Create Local Updater. Local means not run in cluster. - # For a cluster training, here we can change to createRemoteUpdater - # in future. - updater = optimizer.create_local_updater() - assert isinstance(updater, api.ParameterUpdater) - - # define network - images = paddle_v2.layer.data( - name='pixel', type=paddle_v2.data_type.dense_vector(784)) - label = paddle_v2.layer.data( - name='label', type=paddle_v2.data_type.integer_value(10)) - hidden1 = paddle_v2.layer.fc(input=images, size=200) - hidden2 = paddle_v2.layer.fc(input=hidden1, size=200) - inference = paddle_v2.layer.fc(input=hidden2, - size=10, - act=paddle_v2.activation.Softmax()) - cost = paddle_v2.layer.classification_cost(input=inference, label=label) - - # Create Simple Gradient Machine. - model_config = paddle_v2.layer.parse_network(cost) - m = api.GradientMachine.createFromConfigProto(model_config, - api.CREATE_MODE_NORMAL, - optimizer.enable_types()) - - # This type check is not useful. Only enable type hint in IDE. - # Such as PyCharm - assert isinstance(m, api.GradientMachine) - - # Initialize Parameter by numpy. - init_parameter(network=m) - - # Initialize ParameterUpdater. - updater.init(m) - - # DataProvider Converter is a utility convert Python Object to Paddle C++ - # Input. The input format is as same as Paddle's DataProvider. - converter = DataProviderConverter(input_types=[images.type, label.type]) - - train_file = './data/raw_data/train' - test_file = './data/raw_data/t10k' - - # start gradient machine. - # the gradient machine must be started before invoke forward/backward. - # not just for training, but also for inference. - m.start() - - # evaluator can print error rate, etc. It is a C++ class. - batch_evaluator = m.makeEvaluator() - test_evaluator = m.makeEvaluator() - - # Get Train Data. - # TrainData will stored in a data pool. Currently implementation is not care - # about memory, speed. Just a very naive implementation. - train_data_generator = input_order_converter(read_from_mnist(train_file)) - train_data = BatchPool(train_data_generator, 512) - - # outArgs is Neural Network forward result. Here is not useful, just passed - # to gradient_machine.forward - outArgs = api.Arguments.createArguments(0) - - for pass_id in xrange(2): # we train 2 passes. - updater.startPass() - - for batch_id, data_batch in enumerate(train_data()): - # data_batch is input images. - # here, for online learning, we could get data_batch from network. - - # Start update one batch. - pass_type = updater.startBatch(len(data_batch)) - - # Start BatchEvaluator. - # batch_evaluator can be used between start/finish. - batch_evaluator.start() - - # forwardBackward is a shortcut for forward and backward. - # It is sometimes faster than invoke forward/backward separately, - # because in GradientMachine, it may be async. - m.forwardBackward(converter(data_batch), outArgs, pass_type) - - for each_param in m.getParameters(): - updater.update(each_param) - - # Get cost. We use numpy to calculate total cost for this batch. - cost_vec = outArgs.getSlotValue(0) - cost_vec = cost_vec.copyToNumpyMat() - cost = cost_vec.sum() / len(data_batch) - - # Make evaluator works. - m.eval(batch_evaluator) - - # Print logs. - print 'Pass id', pass_id, 'Batch id', batch_id, 'with cost=', \ - cost, batch_evaluator - - batch_evaluator.finish() - # Finish batch. - # * will clear gradient. - # * ensure all values should be updated. - updater.finishBatch(cost) - - # testing stage. use test data set to test current network. - updater.apply() - test_evaluator.start() - test_data_generator = input_order_converter(read_from_mnist(test_file)) - for data_batch in generator_to_batch(test_data_generator, 512): - # in testing stage, only forward is needed. - m.forward(converter(data_batch), outArgs, api.PASS_TEST) - m.eval(test_evaluator) - - # print error rate for test data set - print 'Pass', pass_id, ' test evaluator: ', test_evaluator - test_evaluator.finish() - updater.restore() - - updater.catchUpWith() - params = m.getParameters() - for each_param in params: - assert isinstance(each_param, api.Parameter) - value = each_param.getBuf(api.PARAMETER_VALUE) - value = value.copyToNumpyArray() - - # Here, we could save parameter to every where you want - print each_param.getName(), value - - updater.finishPass() - - m.finish() - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/mnist/data/generate_list.py b/v1_api_demo/mnist/data/generate_list.py deleted file mode 100644 index 49981cc7a93308bc96ad5097eba749440e958525..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/data/generate_list.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -o = open("./" + "train.list", "w") -o.write("./data/raw_data/train" + "\n") -o.close() - -o = open("./" + "test.list", "w") -o.write("./data/raw_data/t10k" + "\n") -o.close() diff --git a/v1_api_demo/mnist/data/get_mnist_data.sh b/v1_api_demo/mnist/data/get_mnist_data.sh deleted file mode 100755 index 5a2e34026d4fe7f8315d4f5453bec7c4ee4f6885..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/data/get_mnist_data.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This scripts downloads the mnist data and unzips it. -set -e -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -rm -rf "$DIR/raw_data" -mkdir "$DIR/raw_data" -cd "$DIR/raw_data" - -echo "Downloading..." - -for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte -do - if [ ! -e $fname ]; then - wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz - gunzip ${fname}.gz - fi -done - -cd $DIR -rm -f *.list -python generate_list.py diff --git a/v1_api_demo/mnist/light_mnist.py b/v1_api_demo/mnist/light_mnist.py deleted file mode 100644 index 33409054357d2f0c6a765b3ab3164eb2e584467e..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/light_mnist.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -is_predict = get_config_arg("is_predict", bool, False) - -####################Data Configuration ################## - -if not is_predict: - data_dir = './data/' - define_py_data_sources2( - train_list=data_dir + 'train.list', - test_list=data_dir + 'test.list', - module='mnist_provider', - obj='process') - -######################Algorithm Configuration ############# -settings(batch_size=50, learning_rate=0.001, learning_method=AdamOptimizer()) - -#######################Network Configuration ############# - -data_size = 1 * 28 * 28 -label_size = 10 -img = data_layer(name='pixel', size=data_size) - - -# light cnn -# A shallower cnn model: [CNN, BN, ReLU, Max-Pooling] x4 + FC x1 -# Easier to train for mnist dataset and quite efficient -# Final performance is close to deeper ones on tasks such as digital and character classification -def light_cnn(input_image, num_channels, num_classes): - def __light__(ipt, - num_filter=128, - times=1, - conv_filter_size=3, - dropouts=0, - num_channels_=None): - return img_conv_group( - input=ipt, - num_channels=num_channels_, - pool_size=2, - pool_stride=2, - conv_padding=0, - conv_num_filter=[num_filter] * times, - conv_filter_size=conv_filter_size, - conv_act=ReluActivation(), - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type=MaxPooling()) - - tmp = __light__(input_image, num_filter=128, num_channels_=num_channels) - tmp = __light__(tmp, num_filter=128) - tmp = __light__(tmp, num_filter=128) - tmp = __light__(tmp, num_filter=128, conv_filter_size=1) - - tmp = fc_layer(input=tmp, size=num_classes, act=SoftmaxActivation()) - return tmp - - -predict = light_cnn(input_image=img, num_channels=1, num_classes=label_size) - -if not is_predict: - lbl = data_layer(name="label", size=label_size) - inputs(img, lbl) - outputs(classification_cost(input=predict, label=lbl)) -else: - outputs(predict) diff --git a/v1_api_demo/mnist/mnist_provider.py b/v1_api_demo/mnist/mnist_provider.py deleted file mode 100644 index 888cfef1e7e3e1b4f556756c003eeb23e741cabe..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/mnist_provider.py +++ /dev/null @@ -1,12 +0,0 @@ -from paddle.trainer.PyDataProvider2 import * -from mnist_util import read_from_mnist - - -# Define a py data provider -@provider( - input_types={'pixel': dense_vector(28 * 28), - 'label': integer_value(10)}, - cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, filename): # settings is not used currently. - for each in read_from_mnist(filename): - yield each diff --git a/v1_api_demo/mnist/mnist_util.py b/v1_api_demo/mnist/mnist_util.py deleted file mode 100644 index 3fd88ae7edc821296ca0accbf6dedc083e411744..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/mnist_util.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy - -__all__ = ['read_from_mnist'] - - -def read_from_mnist(filename): - imgf = filename + "-images-idx3-ubyte" - labelf = filename + "-labels-idx1-ubyte" - f = open(imgf, "rb") - l = open(labelf, "rb") - - f.read(16) - l.read(8) - - # Define number of samples for train/test - if "train" in filename: - n = 60000 - else: - n = 10000 - - images = numpy.fromfile( - f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') - images = images / 255.0 * 2.0 - 1.0 - labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") - - for i in xrange(n): - yield {"pixel": images[i, :], 'label': labels[i]} - - f.close() - l.close() diff --git a/v1_api_demo/mnist/train.sh b/v1_api_demo/mnist/train.sh deleted file mode 100755 index ca2b1ad9eb960685b95b0f294a9b929e1a4acab1..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/train.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -config=vgg_16_mnist.py -output=./mnist_vgg_model -log=train.log - -paddle train \ ---config=$config \ ---dot_period=10 \ ---log_period=100 \ ---test_all_data_in_one_period=1 \ ---use_gpu=0 \ ---trainer_count=1 \ ---num_passes=100 \ ---save_dir=$output \ -2>&1 | tee $log -paddle usage -l $log -e $? -n "mnist_train" >/dev/null 2>&1 - -python -m paddle.utils.plotcurve -i $log > plot.png diff --git a/v1_api_demo/mnist/vgg_16_mnist.py b/v1_api_demo/mnist/vgg_16_mnist.py deleted file mode 100644 index a819b391c690fb473801eb2e7ba3161cc31b5b4b..0000000000000000000000000000000000000000 --- a/v1_api_demo/mnist/vgg_16_mnist.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -is_predict = get_config_arg("is_predict", bool, False) - -####################Data Configuration ################## - -if not is_predict: - data_dir = './data/' - define_py_data_sources2( - train_list=data_dir + 'train.list', - test_list=data_dir + 'test.list', - module='mnist_provider', - obj='process') - -######################Algorithm Configuration ############# -settings( - batch_size=128, - learning_rate=0.1 / 128.0, - learning_method=MomentumOptimizer(0.9), - regularization=L2Regularization(0.0005 * 128)) - -#######################Network Configuration ############# - -data_size = 1 * 28 * 28 -label_size = 10 -img = data_layer(name='pixel', size=data_size) - -# small_vgg is predined in trainer_config_helpers.network -predict = small_vgg(input_image=img, num_channels=1, num_classes=label_size) - -if not is_predict: - lbl = data_layer(name="label", size=label_size) - inputs(img, lbl) - outputs(classification_cost(input=predict, label=lbl)) -else: - outputs(predict) diff --git a/v1_api_demo/model_zoo/embedding/.gitignore b/v1_api_demo/model_zoo/embedding/.gitignore deleted file mode 100644 index 908f5a3fb2f7c34368ea24d0fc3ac9cac29a4fdb..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/embedding/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -baidu.dict -model_*.emb diff --git a/v1_api_demo/model_zoo/embedding/extract_para.py b/v1_api_demo/model_zoo/embedding/extract_para.py deleted file mode 100755 index 570b90c1f772c8f6abfc6cda02560fd3471ef0b6..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/embedding/extract_para.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/env python -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Example: - python extract_para.py --preModel PREMODEL --preDict PREDICT \ - --usrModel USRMODEL --usrDict USRDICT -d DIM - -Options: - -h, --help show this help message and exit - --preModel PREMODEL the name of pretrained embedding model - --preDict PREDICT the name of pretrained dictionary - --usrModel usrModel the name of output usr embedding model - --usrDict usrDict the name of user specified dictionary - -d DIM dimension of parameter -""" -from optparse import OptionParser -import struct - - -def get_row_index(preDict, usrDict): - """ - Get the row positions for all words in user dictionary from pre-trained dictionary. - return: a list of row positions - Example: preDict='a\nb\nc\n', usrDict='a\nc\n', then return [0,2] - """ - pos = [] - index = dict() - with open(preDict, "r") as f: - for line_index, line in enumerate(f): - word = line.strip().split()[0] - index[word] = line_index - with open(usrDict, "r") as f: - for line in f: - word = line.strip().split()[0] - pos.append(index[word]) - return pos - - -def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict, - paraDim): - """ - Extract desired parameters from a pretrained embedding model based on user dictionary - """ - if paraDim not in [32, 64, 128, 256]: - raise RuntimeError("We only support 32, 64, 128, 256 dimensions now") - - fi = open(preModel, "rb") - fo = open(usrModel, "wb") - - # write filehead - rowIndex = get_row_index(preDict, usrDict) - newHead = struct.pack("iil", 0, 4, len(rowIndex) * paraDim) - fo.write(newHead) - bytes = 4 * paraDim - for i in range(0, len(rowIndex)): - # find the absolute position of input file - fi.seek(rowIndex[i] * bytes + 16, 0) - fo.write(fi.read(bytes)) - - print "extract parameters finish, total", len(rowIndex), "lines" - fi.close() - - -def main(): - """ - Main entry for running paraconvert.py - """ - usage = "usage: \n" \ - "python %prog --preModel PREMODEL --preDict PREDICT" \ - " --usrModel USRMODEL --usrDict USRDICT -d DIM" - parser = OptionParser(usage) - parser.add_option( - "--preModel", - action="store", - dest="preModel", - help="the name of pretrained embedding model") - parser.add_option( - "--preDict", - action="store", - dest="preDict", - help="the name of pretrained dictionary") - parser.add_option( - "--usrModel", - action="store", - dest="usrModel", - help="the name of output usr embedding model") - parser.add_option( - "--usrDict", - action="store", - dest="usrDict", - help="the name of user specified dictionary") - parser.add_option( - "-d", action="store", dest="dim", help="dimension of parameter") - (options, args) = parser.parse_args() - extract_parameters_by_usrDict(options.preModel, options.preDict, - options.usrModel, options.usrDict, - int(options.dim)) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/model_zoo/embedding/paraconvert.py b/v1_api_demo/model_zoo/embedding/paraconvert.py deleted file mode 100755 index ce7a70efc43d7f85708f1e12bb94739f3588370c..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/embedding/paraconvert.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/env python -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Example: - python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM - python paraconvert.py --t2b -i INPUT -o OUTPUT - -Options: - -h, --help show this help message and exit - --b2t convert parameter file of embedding model from binary to text - --t2b convert parameter file of embedding model from text to binary - -i INPUT input parameter file name - -o OUTPUT output parameter file name - -d DIM dimension of parameter -""" -from optparse import OptionParser -import struct - - -def binary2text(input, output, paraDim): - """ - Convert a binary parameter file of embedding model to be a text file. - input: the name of input binary parameter file, the format is: - 1) the first 16 bytes is filehead: - version(4 bytes): version of paddle, default = 0 - floatSize(4 bytes): sizeof(float) = 4 - paraCount(8 bytes): total number of parameter - 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes - output: the name of output text parameter file, for example: - 0,4,32156096 - -0.7845433,1.1937413,-0.1704215,... - 0.0000909,0.0009465,-0.0008813,... - ... - the format is: - 1) the first line is filehead: - version=0, floatSize=4, paraCount=32156096 - 2) other lines print the paramters - a) each line prints paraDim paramters splitted by ',' - b) there is paraCount/paraDim lines (embedding words) - paraDim: dimension of parameters - """ - fi = open(input, "rb") - fo = open(output, "w") - """ - """ - version, floatSize, paraCount = struct.unpack("iil", fi.read(16)) - newHead = ','.join([str(version), str(floatSize), str(paraCount)]) - print >> fo, newHead - - bytes = 4 * int(paraDim) - format = "%df" % int(paraDim) - context = fi.read(bytes) - line = 0 - - while context: - numbers = struct.unpack(format, context) - lst = [] - for i in numbers: - lst.append('%8.7f' % i) - print >> fo, ','.join(lst) - context = fi.read(bytes) - line += 1 - fi.close() - fo.close() - print "binary2text finish, total", line, "lines" - - -def get_para_count(input): - """ - Compute the total number of embedding parameters in input text file. - input: the name of input text file - """ - numRows = 1 - paraDim = 0 - with open(input) as f: - line = f.readline() - paraDim = len(line.split(",")) - for line in f: - numRows += 1 - return numRows * paraDim - - -def text2binary(input, output, paddle_head=True): - """ - Convert a text parameter file of embedding model to be a binary file. - input: the name of input text parameter file, for example: - -0.7845433,1.1937413,-0.1704215,... - 0.0000909,0.0009465,-0.0008813,... - ... - the format is: - 1) it doesn't have filehead - 2) each line stores the same dimension of parameters, - the separator is commas ',' - output: the name of output binary parameter file, the format is: - 1) the first 16 bytes is filehead: - version(4 bytes), floatSize(4 bytes), paraCount(8 bytes) - 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes - """ - fi = open(input, "r") - fo = open(output, "wb") - - newHead = struct.pack("iil", 0, 4, get_para_count(input)) - fo.write(newHead) - - count = 0 - for line in fi: - line = line.strip().split(",") - for i in range(0, len(line)): - binary_data = struct.pack("f", float(line[i])) - fo.write(binary_data) - count += 1 - fi.close() - fo.close() - print "text2binary finish, total", count, "lines" - - -def main(): - """ - Main entry for running paraconvert.py - """ - usage = "usage: \n" \ - "python %prog --b2t -i INPUT -o OUTPUT -d DIM \n" \ - "python %prog --t2b -i INPUT -o OUTPUT" - parser = OptionParser(usage) - parser.add_option( - "--b2t", - action="store_true", - help="convert parameter file of embedding model from binary to text") - parser.add_option( - "--t2b", - action="store_true", - help="convert parameter file of embedding model from text to binary") - parser.add_option( - "-i", action="store", dest="input", help="input parameter file name") - parser.add_option( - "-o", action="store", dest="output", help="output parameter file name") - parser.add_option( - "-d", action="store", dest="dim", help="dimension of parameter") - (options, args) = parser.parse_args() - if options.b2t: - binary2text(options.input, options.output, options.dim) - if options.t2b: - text2binary(options.input, options.output) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/model_zoo/embedding/pre_DictAndModel.sh b/v1_api_demo/model_zoo/embedding/pre_DictAndModel.sh deleted file mode 100755 index f61c65a935c76032a06613cfe0b50f1c90bc50d9..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/embedding/pre_DictAndModel.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -set -x -BASE_URL='http://paddlepaddle.cdn.bcebos.com/model_zoo/embedding' - -DOWNLOAD_ITEMS=(baidu.dict model_32.emb model_64.emb model_128.emb model_256.emb) -ITEM_MD5=(fa03a12321eaab6c30a8fcc9442eaea3 - f88c8325ee6da6187f1080e8fe66c1cd - 927cf70f27f860aff1a5703ebf7f1584 - a52e43655cd25d279777ed509a1ae27b - b92c67fe9ff70fea53596080e351ac80) - -for ((i=0; i<${#ITEM_MD5[@]}; i++)) -do - FILENAME=${DOWNLOAD_ITEMS[${i}]} - REAL_MD5=`wget ${BASE_URL}/${FILENAME} -O - | tee ${FILENAME} | md5sum | cut -d ' ' -f 1` - EXPECTED_MD5=${ITEM_MD5[${i}]} - [ "${EXPECTED_MD5}" = "${REAL_MD5}" ] -done diff --git a/v1_api_demo/model_zoo/resnet/.gitignore b/v1_api_demo/model_zoo/resnet/.gitignore deleted file mode 100644 index 7a64209b62340a5c5a51626821028e63ed5e588e..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -fea_output/ -features/ -model.list -ResNet_50.dot -ResNet_50.png diff --git a/v1_api_demo/model_zoo/resnet/classify.py b/v1_api_demo/model_zoo/resnet/classify.py deleted file mode 100755 index 6074cc1d3a85e13e3e8d336d81e22104f9d8e7cf..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/classify.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import cPickle -import logging -from PIL import Image -import numpy as np -from optparse import OptionParser - -import paddle.utils.image_util as image_util - -from py_paddle import swig_paddle, DataProviderConverter -from paddle.trainer.PyDataProvider2 import dense_vector -from paddle.trainer.config_parser import parse_config - -logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') -logging.getLogger().setLevel(logging.INFO) - - -class ImageClassifier(): - def __init__(self, - train_conf, - model_dir=None, - resize_dim=256, - crop_dim=224, - use_gpu=True, - mean_file=None, - output_layer=None, - oversample=False, - is_color=True): - """ - train_conf: network configure. - model_dir: string, directory of model. - resize_dim: int, resized image size. - crop_dim: int, crop size. - mean_file: string, image mean file. - oversample: bool, oversample means multiple crops, namely five - patches (the four corner patches and the center - patch) as well as their horizontal reflections, - ten crops in all. - """ - self.train_conf = train_conf - self.model_dir = model_dir - if model_dir is None: - self.model_dir = os.path.dirname(train_conf) - - self.resize_dim = resize_dim - self.crop_dims = [crop_dim, crop_dim] - self.oversample = oversample - self.is_color = is_color - - self.output_layer = output_layer - if self.output_layer: - assert isinstance(self.output_layer, basestring) - self.output_layer = self.output_layer.split(",") - - self.transformer = image_util.ImageTransformer(is_color=is_color) - self.transformer.set_transpose((2, 0, 1)) - self.transformer.set_channel_swap((2, 1, 0)) - - self.mean_file = mean_file - if self.mean_file is not None: - mean = np.load(self.mean_file)['data_mean'] - mean = mean.reshape(3, self.crop_dims[0], self.crop_dims[1]) - self.transformer.set_mean(mean) # mean pixel - else: - # if you use three mean value, set like: - # this three mean value is calculated from ImageNet. - self.transformer.set_mean(np.array([103.939, 116.779, 123.68])) - - conf_args = "is_test=1,use_gpu=%d,is_predict=1" % (int(use_gpu)) - conf = parse_config(train_conf, conf_args) - swig_paddle.initPaddle("--use_gpu=%d" % (int(use_gpu))) - self.network = swig_paddle.GradientMachine.createFromConfigProto( - conf.model_config) - assert isinstance(self.network, swig_paddle.GradientMachine) - self.network.loadParameters(self.model_dir) - - data_size = 3 * self.crop_dims[0] * self.crop_dims[1] - slots = [dense_vector(data_size)] - self.converter = DataProviderConverter(slots) - - def get_data(self, img_path): - """ - 1. load image from img_path. - 2. resize or oversampling. - 3. transformer data: transpose, channel swap, sub mean. - return K x H x W ndarray. - - img_path: image path. - """ - image = image_util.load_image(img_path, self.is_color) - # Another way to extract oversampled features is that - # cropping and averaging from large feature map which is - # calculated by large size of image. - # This way reduces the computation. - if self.oversample: - # image_util.resize_image: short side is self.resize_dim - image = image_util.resize_image(image, self.resize_dim) - image = np.array(image) - input = np.zeros( - (1, image.shape[0], image.shape[1], 3), dtype=np.float32) - input[0] = image.astype(np.float32) - input = image_util.oversample(input, self.crop_dims) - else: - image = image.resize(self.crop_dims, Image.ANTIALIAS) - input = np.zeros( - (1, self.crop_dims[0], self.crop_dims[1], 3), dtype=np.float32) - input[0] = np.array(image).astype(np.float32) - - data_in = [] - for img in input: - img = self.transformer.transformer(img).flatten() - data_in.append([img.tolist()]) - # paddle input: [[[]],[[]],...], [[]] is one sample. - return data_in - - def forward(self, input_data): - """ - return output arguments which are the Outputs() in network configure. - - input_data: py_paddle input data. - call forward. - """ - in_arg = self.converter(input_data) - return self.network.forwardTest(in_arg) - - def forward(self, data, output_layer): - """ - return output arguments which are the Outputs() in network configure. - - input_data: py_paddle input data. - call forward. - """ - input = self.converter(data) - self.network.forwardTest(input) - output = self.network.getLayerOutputs(output_layer) - res = {} - if isinstance(output_layer, basestring): - output_layer = [output_layer] - for name in output_layer: - # For oversampling, average predictions across crops. - # If not, the shape of output[name]: (1, class_number), - # the mean is also applicable. - res[name] = output[name]['value'].mean(0) - - return res - - def predict(self, data_file): - """ - call forward and predicting. - - data_file: input image list. - """ - image_files = open(data_file, 'rb').readlines() - results = {} - if self.output_layer is None: - self.output_layer = ["output"] - for line in image_files: - image = line.split()[0] - data = self.get_data(image) - prob = self.forward(data, self.output_layer) - lab = np.argsort(-prob[self.output_layer[0]]) - results[image] = lab[0] - logging.info("Label of %s is: %d", image, lab[0]) - return results - - def extract(self, data_file, output_dir, batch_size=10000): - """ - extract and save features of output layers, which are - specify in Outputs() in network configure. - - data_file: file name of input data. - output_dir: saved directory of extracted features. - batch_size: sample number of one batch file. - """ - if not os.path.exists(output_dir): - os.mkdir(output_dir) - - sample_num = 0 - batch_num = 0 - image_feature = {} - image_files = open(data_file, 'rb').readlines() - for idx, line in enumerate(image_files): - image = line.split()[0] - data = self.get_data(image) - feature = self.forward(data, self.output_layer) - # save extracted features - file_name = image.split("/")[-1] - image_feature[file_name] = feature - sample_num += 1 - if sample_num == batch_size: - batch_name = os.path.join(output_dir, 'batch_%d' % (batch_num)) - self.save_file(image_feature, batch_name) - logging.info('Finish batch %d', batch_num) - batch_num += 1 - sample_num = 0 - image_feature = {} - if idx % 1000 == 0: - logging.info('%d/%d, %s', idx, len(image_files), file_name) - if sample_num > 0: - batch_name = os.path.join(output_dir, 'batch_%d' % (batch_num)) - self.save_file(image_feature, batch_name) - logging.info('Finish batch %d', batch_num) - logging.info('Done: make image feature batch') - - def save_file(self, data, file): - of = open(file, 'wb') - cPickle.dump(data, of, protocol=cPickle.HIGHEST_PROTOCOL) - - -def option_parser(): - """ - Main entry for predciting - """ - usage = "%prog -c config -i data_list -w model_dir [options]" - parser = OptionParser(usage="usage: %s" % usage) - parser.add_option( - "-j", - "--job", - action="store", - dest="job_type", - help="job type: predict, extract\ - predict: predicting,\ - extract: extract features") - parser.add_option( - "-c", - "--conf", - action="store", - dest="train_conf", - help="network config") - parser.add_option( - "-i", "--data", action="store", dest="data_file", help="image list") - parser.add_option( - "-w", - "--model", - action="store", - dest="model_path", - default=None, - help="model path") - parser.add_option( - "-g", - "--use_gpu", - action="store", - dest="use_gpu", - default=True, - help="Whether to use gpu mode.") - parser.add_option( - "-o", - "--output_dir", - action="store", - dest="output_dir", - default="output", - help="output path") - parser.add_option( - "-m", - "--mean", - action="store", - dest="mean", - default=None, - help="mean file.") - parser.add_option( - "-p", - "--multi_crop", - action="store_true", - dest="multi_crop", - default=False, - help="Wether to use multiple crops on image.") - parser.add_option("-l", "--output_layer", action="store", - dest="output_layer", default=None, - help="--job=extract, specify layers to extract "\ - "features, --job=predict, specify layer of " - "classification probability, output in resnet.py.") - return parser.parse_args() - - -def main(): - """ - 1. parse input arguments. - 2. predicting or extract features according job type. - """ - options, args = option_parser() - obj = ImageClassifier( - options.train_conf, - options.model_path, - use_gpu=options.use_gpu, - mean_file=options.mean, - output_layer=options.output_layer, - oversample=options.multi_crop) - if options.job_type == "predict": - obj.predict(options.data_file) - - elif options.job_type == "extract": - obj.extract(options.data_file, options.output_dir) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/model_zoo/resnet/example/.gitignore b/v1_api_demo/model_zoo/resnet/example/.gitignore deleted file mode 100644 index 4a2b5962a6800f251cba655c026331f14648c86e..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/example/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*image_list_provider_copy_1.py diff --git a/v1_api_demo/model_zoo/resnet/example/__init__.py b/v1_api_demo/model_zoo/resnet/example/__init__.py deleted file mode 100644 index f662d6826321eb840739382558f76327d27b5847..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/example/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/v1_api_demo/model_zoo/resnet/example/cat.jpg b/v1_api_demo/model_zoo/resnet/example/cat.jpg deleted file mode 100644 index 47b01db90eddc46ff845f10bc2accaf2364c272d..0000000000000000000000000000000000000000 Binary files a/v1_api_demo/model_zoo/resnet/example/cat.jpg and /dev/null differ diff --git a/v1_api_demo/model_zoo/resnet/example/dog.jpg b/v1_api_demo/model_zoo/resnet/example/dog.jpg deleted file mode 100644 index b9cc33cf069da5c453b97dbb7383838edd07c199..0000000000000000000000000000000000000000 Binary files a/v1_api_demo/model_zoo/resnet/example/dog.jpg and /dev/null differ diff --git a/v1_api_demo/model_zoo/resnet/example/image_list_provider.py b/v1_api_demo/model_zoo/resnet/example/image_list_provider.py deleted file mode 100644 index 2cd8eb8bf850f41282ed5db2885dc0b7218c79f7..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/example/image_list_provider.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.utils.image_util import * -from paddle.trainer.PyDataProvider2 import * - - -def hook(settings, image_size, crop_size, color, file_list, is_train, **kwargs): - """ - Description: Init with a list of data file - file_list is the name list of input files. - kwargs["load_data_args"] is the value of 'load_data_args' - which can be set in config. - Each args is separated by a column. - image_size: the crop image size. - mean_meta: the path of the meta file to store the mean image. - mean_value: can be mean value, not a file. - can not set mean_meta and mean_value at the same time. - color: 'color' means a color image. Otherwise, it means a gray image. - is_train: whether the data provider is used for training. - Data argumentation might be different for training and testing. - """ - settings.img_size = image_size - settings.crop_size = crop_size - settings.mean_img_size = settings.crop_size - settings.color = color # default is color - settings.is_train = is_train - - settings.is_swap_channel = kwargs.get('swap_channel', None) - if settings.is_swap_channel is not None: - settings.swap_channel = settings.is_swap_channel - settings.is_swap_channel = True - - if settings.color: - settings.img_input_size = settings.crop_size * settings.crop_size * 3 - else: - settings.img_input_size = settings.crop_size * settings.crop_size - - settings.file_list = file_list - settings.mean_meta = kwargs.get('mean_meta', None) - settings.mean_value = kwargs.get('mean_value', None) - # can not specify both mean_meta and mean_value. - assert not (settings.mean_meta and settings.mean_value) - if not settings.mean_meta: - settings.mean_value = kwargs.get('mean_value') - sz = settings.crop_size * settings.crop_size - settings.img_mean = np.zeros(sz * 3, dtype=np.single) - for idx, value in enumerate(settings.mean_value): - settings.img_mean[idx * sz:(idx + 1) * sz] = value - settings.img_mean = settings.img_mean.reshape(3, settings.crop_size, - settings.crop_size) - - else: - settings.img_mean = load_meta(settings.mean_meta, - settings.mean_img_size, - settings.crop_size, settings.color) - - settings.input_types = [ - dense_vector(settings.img_input_size), # image feature - integer_value(1) - ] # labels - - settings.logger.info('Image short side: %s', settings.img_size) - settings.logger.info('Crop size: %s', settings.crop_size) - settings.logger.info('Meta path: %s', settings.mean_meta) - if settings.is_swap_channel: - settings.logger.info('swap channel: %s', settings.swap_channel) - settings.logger.info('DataProvider Initialization finished') - - -@provider(init_hook=hook, should_shuffle=False) -def processData(settings, file_list): - """ - The main function for loading data. - Load the batch, iterate all the images and labels in this batch. - file_name: the batch file name. - """ - img_path, lab = file_list.strip().split(' ') - img = Image.open(img_path) - img.load() - img = img.resize((settings.img_size, settings.img_size), Image.ANTIALIAS) - img = np.array(img).astype(np.float32) - if len(img.shape) == 3: - img = np.swapaxes(img, 1, 2) - img = np.swapaxes(img, 1, 0) - # swap channel - if settings.is_swap_channel: - img = img[settings.swap_channel, :, :] - img_feat = preprocess_img(img, settings.img_mean, settings.crop_size, - settings.is_train, settings.color) - yield img_feat.tolist(), int(lab.strip()) diff --git a/v1_api_demo/model_zoo/resnet/example/test.list b/v1_api_demo/model_zoo/resnet/example/test.list deleted file mode 100644 index 30bbf630b640a26239fc104c9c08f6ebc9dfaa82..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/example/test.list +++ /dev/null @@ -1,2 +0,0 @@ -example/dog.jpg 0 -example/cat.jpg 0 diff --git a/v1_api_demo/model_zoo/resnet/extract_fea_c++.sh b/v1_api_demo/model_zoo/resnet/extract_fea_c++.sh deleted file mode 100755 index 5447aa92dfb5facd3433eb4a1893e96e3c786c73..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/extract_fea_c++.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -#set names of layer which you want to extract feature -#in Outputs() of resnet.py -#like: Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") -layer_num=50 -configure=./resnet.py -model_path=./model/resnet_$layer_num -fea_dir=fea_output -#Output is text file. -#Each line is one sample's features. -#If you set N layer names in Outputs() -#each line contains N features sperated by ";". - -# create model list file. -model_list=./model.list -touch $model_list | echo $model_path > $model_list - -paddle train \ - --local=true \ - --job=test \ - --config=$configure \ - --model_list=$model_list \ - --use_gpu=1 \ - --predict_output_dir=$fea_dir \ - --config_args=is_test=1,layer_num=$layer_num diff --git a/v1_api_demo/model_zoo/resnet/extract_fea_py.sh b/v1_api_demo/model_zoo/resnet/extract_fea_py.sh deleted file mode 100755 index 2e87152f7f8598f487870291271cdee646105044..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/extract_fea_py.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -#Note if you use CPU mode, you need to set use_gpu=0 in classify.py. like this: -#conf_args = "is_test=0,use_gpu=1,is_predict=1" -#conf = parse_config(train_conf, conf_args) -#swig_paddle.initPaddle("--use_gpu=0") -python classify.py \ - --job=extract \ - --conf=resnet.py \ - --use_gpu=1 \ - --mean=model/mean_meta_224/mean.meta \ - --model=model/resnet_50 \ - --data=./example/test.list \ - --output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \ - --output_dir=features diff --git a/v1_api_demo/model_zoo/resnet/get_model.sh b/v1_api_demo/model_zoo/resnet/get_model.sh deleted file mode 100755 index b33d8178ab7859fc0b0d514fb19bec2c28a77c3d..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/get_model.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -mkdir model -cd model - -echo "Downloading ResNet models..." - -for file in resnet_50.tar.gz resnet_101.tar.gz resnet_152.tar.gz mean_meta_224.tar.gz -do - wget http://paddlepaddle.bj.bcebos.com/model_zoo/imagenet/$file - tar -xvf $file - rm $file -done - -echo "Done." diff --git a/v1_api_demo/model_zoo/resnet/load_feature.py b/v1_api_demo/model_zoo/resnet/load_feature.py deleted file mode 100644 index 5d3d0c0d30ef710c37c98e93a51b2f813d636b59..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/load_feature.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import cPickle -import logging - -logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') -logging.getLogger().setLevel(logging.INFO) - - -def load_feature_c(file): - """ - Load feature extracted by C++ interface. - Return a list. - file: feature file. - """ - features = [] - f = open(file, 'r') - for line in f: - sample = [] - for slot in line.strip().split(";"): - fea = [float(val) for val in slot.strip().split()] - if fea: - sample.append(fea) - features.append(sample) - f.close() - return features - - -def load_feature_py(feature_dir): - """ - Load feature extracted by python interface. - Return a dictionary. - feature_dir: directory of feature file. - """ - file_list = os.listdir(feature_dir) - file_list = [os.path.join(feature_dir, f) for f in file_list] - features = {} - for file_name in file_list: - with open(file_name, 'rb') as f: - feature = cPickle.load(f) - features.update(feature) - logging.info('Load feature file %s', file_name) - return features - - -if __name__ == '__main__': - print load_feature_py(sys.argv[1]) - #print load_feature_c(sys.argv[1]) diff --git a/v1_api_demo/model_zoo/resnet/net_diagram.sh b/v1_api_demo/model_zoo/resnet/net_diagram.sh deleted file mode 100755 index 1b06ffa44eec8a0f312420c35699d3902f9a6400..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/net_diagram.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -:' -Visual deep residual network -1. Using make_model_diagram.py to generate dot file. -2. Using graphviz to convert dot file. - -Usage: -./net_diagram.sh -' - -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -img_type=png -img_fileprefix=ResNet_50 -conf_filename=resnet.py -dot_filename=ResNet_50.dot -config_str="layer_num=50,data_provider=0" - -python -m paddle.utils.make_model_diagram $conf_filename $dot_filename $config_str - -# If you have installed graphviz, running like this: -# dot -Tpng -o ResNet.png ResNet.dot diff --git a/v1_api_demo/model_zoo/resnet/predict.sh b/v1_api_demo/model_zoo/resnet/predict.sh deleted file mode 100755 index 2b67b17c48c60cc8a7b7c46a1c80a3f2bf281870..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/predict.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -python classify.py \ - --job=predict \ - --conf=resnet.py\ - --model=model/resnet_50 \ - --multi_crop \ - --use_gpu=1 \ - --data=./example/test.list diff --git a/v1_api_demo/model_zoo/resnet/resnet.py b/v1_api_demo/model_zoo/resnet/resnet.py deleted file mode 100644 index 6fdd97fefc62392c93ecffae0fc918e8dc4b18c5..0000000000000000000000000000000000000000 --- a/v1_api_demo/model_zoo/resnet/resnet.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * -""" -paper: https://arxiv.org/abs/1512.03385 -""" -is_test = get_config_arg("is_test", bool, False) -is_predict = get_config_arg("is_predict", bool, False) -data_provider = get_config_arg("data_provider", bool, True) -layer_num = get_config_arg("layer_num", int, 50) - -if not is_predict and data_provider: - train_list = 'train.list' if not is_test else None - # mean.meta is mean file of ImageNet dataset. - # mean.meta size : 3 x 224 x 224. - # If you use three mean value, set like: - # "mean_value:103.939,116.779,123.68;" - args = { - 'mean_meta': "model/mean_meta_224/mean.meta", - 'image_size': 224, - 'crop_size': 224, - 'color': True, - 'swap_channel:': [2, 1, 0] - } - define_py_data_sources2( - train_list, - 'example/test.list', - module="example.image_list_provider", - obj="processData", - args=args) - -batch_size = 1 -learning_rate = 0.1 / batch_size -momentum = 0.9 -weight_decay = 0.0001 * batch_size -default_momentum(momentum) -default_decay_rate(weight_decay) - -Settings( - algorithm='sgd', - batch_size=batch_size, - learning_rate=learning_rate, - - # set the appropriate parameters according your schedule - learning_method='momentum', - learning_rate_decay_a=0.5, - learning_rate_decay_b=1200000 * 10, - learning_rate_schedule="discexp", ) - - -def conv_bn_layer(name, - input, - filter_size, - num_filters, - stride, - padding, - channels=None, - active_type=ReluActivation()): - """ - A wrapper for conv layer with batch normalization layers. - Note: - conv layer has no activation. - """ - - tmp = img_conv_layer( - name=name + "_conv", - input=input, - filter_size=filter_size, - num_channels=channels, - num_filters=num_filters, - stride=stride, - padding=padding, - act=LinearActivation(), - bias_attr=False) - return batch_norm_layer( - name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test) - - -def bottleneck_block(name, input, num_filters1, num_filters2): - """ - A wrapper for bottlenect building block in ResNet. - Last conv_bn_layer has no activation. - Addto layer has activation of relu. - """ - last_name = conv_bn_layer( - name=name + '_branch2a', - input=input, - filter_size=1, - num_filters=num_filters1, - stride=1, - padding=0) - last_name = conv_bn_layer( - name=name + '_branch2b', - input=last_name, - filter_size=3, - num_filters=num_filters1, - stride=1, - padding=1) - last_name = conv_bn_layer( - name=name + '_branch2c', - input=last_name, - filter_size=1, - num_filters=num_filters2, - stride=1, - padding=0, - active_type=LinearActivation()) - - return addto_layer( - name=name + "_addto", input=[input, last_name], act=ReluActivation()) - - -def mid_projection(name, input, num_filters1, num_filters2, stride=2): - """ - A wrapper for middile projection in ResNet. - projection shortcuts are used for increasing dimensions, - and other shortcuts are identity - branch1: projection shortcuts are used for increasing - dimensions, has no activation. - branch2x: bottleneck building block, shortcuts are identity. - """ - # stride = 2 - branch1 = conv_bn_layer( - name=name + '_branch1', - input=input, - filter_size=1, - num_filters=num_filters2, - stride=stride, - padding=0, - active_type=LinearActivation()) - - last_name = conv_bn_layer( - name=name + '_branch2a', - input=input, - filter_size=1, - num_filters=num_filters1, - stride=stride, - padding=0) - last_name = conv_bn_layer( - name=name + '_branch2b', - input=last_name, - filter_size=3, - num_filters=num_filters1, - stride=1, - padding=1) - - last_name = conv_bn_layer( - name=name + '_branch2c', - input=last_name, - filter_size=1, - num_filters=num_filters2, - stride=1, - padding=0, - active_type=LinearActivation()) - - return addto_layer( - name=name + "_addto", input=[branch1, last_name], act=ReluActivation()) - - -def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3): - """ - A wrapper for 50,101,152 layers of ResNet. - res2_num: number of blocks stacked in conv2_x - res3_num: number of blocks stacked in conv3_x - res4_num: number of blocks stacked in conv4_x - res5_num: number of blocks stacked in conv5_x - """ - # For ImageNet - # conv1: 112x112 - img = data_layer(name='input', size=224 * 224 * 3) - tmp = conv_bn_layer( - "conv1", - img, - filter_size=7, - channels=3, - num_filters=64, - stride=2, - padding=3) - tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2) - - # conv2_x: 56x56 - tmp = mid_projection( - name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1) - for i in xrange(2, res2_num + 1, 1): - tmp = bottleneck_block( - name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256) - - # conv3_x: 28x28 - tmp = mid_projection( - name="res3_1", input=tmp, num_filters1=128, num_filters2=512) - for i in xrange(2, res3_num + 1, 1): - tmp = bottleneck_block( - name="res3_" + str(i), - input=tmp, - num_filters1=128, - num_filters2=512) - - # conv4_x: 14x14 - tmp = mid_projection( - name="res4_1", input=tmp, num_filters1=256, num_filters2=1024) - for i in xrange(2, res4_num + 1, 1): - tmp = bottleneck_block( - name="res4_" + str(i), - input=tmp, - num_filters1=256, - num_filters2=1024) - - # conv5_x: 7x7 - tmp = mid_projection( - name="res5_1", input=tmp, num_filters1=512, num_filters2=2048) - for i in xrange(2, res5_num + 1, 1): - tmp = bottleneck_block( - name="res5_" + str(i), - input=tmp, - num_filters1=512, - num_filters2=2048) - - tmp = img_pool_layer( - name='avgpool', - input=tmp, - pool_size=7, - stride=1, - pool_type=AvgPooling()) - - output = fc_layer( - name='output', input=tmp, size=1000, act=SoftmaxActivation()) - - if not is_predict: - classification_cost( - input=output, label=data_layer( - name='label', size=1)) - - -def res_net_50(): - deep_res_net(3, 4, 6, 3) - - -def res_net_101(): - deep_res_net(3, 4, 23, 3) - - -def res_net_152(): - deep_res_net(3, 8, 36, 3) - - -if not is_predict: - Inputs("input", "label") -else: - Inputs("input") -# Outputs("cost-softmax" if not is_predict else "output") -Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") - -if layer_num == 50: - res_net_50() -elif layer_num == 101: - res_net_101() -elif layer_num == 152: - res_net_152() -else: - print("Wrong layer number.") diff --git a/v1_api_demo/quick_start/.gitignore b/v1_api_demo/quick_start/.gitignore deleted file mode 100644 index f71662563ff96d6227dd568d9951a90b0d09456e..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -*.pyc -data/dict.txt -data/dict_all.txt -data/labels.list -data/mosesdecoder-master/ -data/reviews_Electronics_5.json.gz -data/test.list -data/test.txt -data/train.list -data/train.txt -data/pred.list -data/pred.txt -dataprovider_copy_1.py -train.log -output diff --git a/v1_api_demo/quick_start/api_predict.py b/v1_api_demo/quick_start/api_predict.py deleted file mode 100755 index 9bdffe1006281c58a595e2771561ba62e4c2d6bd..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/api_predict.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os, sys -import numpy as np -from optparse import OptionParser -from py_paddle import swig_paddle, DataProviderConverter -from paddle.trainer.PyDataProvider2 import sparse_binary_vector -from paddle.trainer.config_parser import parse_config -""" -Usage: run following command to show help message. - python api_predict.py -h -""" - - -class QuickStartPrediction(): - def __init__(self, train_conf, dict_file, model_dir=None, label_file=None): - """ - train_conf: trainer configure. - dict_file: word dictionary file name. - model_dir: directory of model. - """ - self.train_conf = train_conf - self.dict_file = dict_file - self.word_dict = {} - self.dict_dim = self.load_dict() - self.model_dir = model_dir - if model_dir is None: - self.model_dir = os.path.dirname(train_conf) - - self.label = None - if label_file is not None: - self.load_label(label_file) - - conf = parse_config(train_conf, "is_predict=1") - self.network = swig_paddle.GradientMachine.createFromConfigProto( - conf.model_config) - self.network.loadParameters(self.model_dir) - input_types = [sparse_binary_vector(self.dict_dim)] - self.converter = DataProviderConverter(input_types) - - def load_dict(self): - """ - Load dictionary from self.dict_file. - """ - for line_count, line in enumerate(open(self.dict_file, 'r')): - self.word_dict[line.strip().split('\t')[0]] = line_count - return len(self.word_dict) - - def load_label(self, label_file): - """ - Load label. - """ - self.label = {} - for v in open(label_file, 'r'): - self.label[int(v.split('\t')[1])] = v.split('\t')[0] - - def get_index(self, data): - """ - transform word into integer index according to the dictionary. - """ - words = data.strip().split() - word_slot = [self.word_dict[w] for w in words if w in self.word_dict] - return word_slot - - def batch_predict(self, data_batch): - input = self.converter(data_batch) - output = self.network.forwardTest(input) - prob = output[0]["id"].tolist() - print("predicting labels is:") - print prob - - -def option_parser(): - usage = "python predict.py -n config -w model_dir -d dictionary -i input_file " - parser = OptionParser(usage="usage: %s [options]" % usage) - parser.add_option( - "-n", - "--tconf", - action="store", - dest="train_conf", - help="network config") - parser.add_option( - "-d", - "--dict", - action="store", - dest="dict_file", - help="dictionary file") - parser.add_option( - "-b", - "--label", - action="store", - dest="label", - default=None, - help="dictionary file") - parser.add_option( - "-c", - "--batch_size", - type="int", - action="store", - dest="batch_size", - default=1, - help="the batch size for prediction") - parser.add_option( - "-w", - "--model", - action="store", - dest="model_path", - default=None, - help="model path") - return parser.parse_args() - - -def main(): - options, args = option_parser() - train_conf = options.train_conf - batch_size = options.batch_size - dict_file = options.dict_file - model_path = options.model_path - label = options.label - swig_paddle.initPaddle("--use_gpu=0") - predict = QuickStartPrediction(train_conf, dict_file, model_path, label) - - batch = [] - labels = [] - for line in sys.stdin: - [label, text] = line.split("\t") - labels.append(int(label)) - batch.append([predict.get_index(text)]) - print("labels is:") - print labels - predict.batch_predict(batch) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/quick_start/api_predict.sh b/v1_api_demo/quick_start/api_predict.sh deleted file mode 100755 index 4d9aa9e8854ed79446a47dbc593f419cdda077b4..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/api_predict.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -#Note the default model is pass-00002, you shold make sure the model path -#exists or change the mode path. -#only test on trainer_config.lr.py -model=output/model/pass-00001/ -config=trainer_config.lr.py -label=data/labels.list -dict=data/dict.txt -batch_size=20 -head -n$batch_size data/test.txt | python api_predict.py \ - --tconf=$config\ - --model=$model \ - --label=$label \ - --dict=$dict \ - --batch_size=$batch_size diff --git a/v1_api_demo/quick_start/api_train.py b/v1_api_demo/quick_start/api_train.py deleted file mode 100644 index 5699789daa4051661b0a72c69f4668f2d8bb9cb2..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/api_train.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import itertools -import random - -from paddle.trainer.config_parser import parse_config -from py_paddle import swig_paddle as api -from py_paddle import DataProviderConverter -from paddle.trainer.PyDataProvider2 \ - import integer_value, integer_value_sequence, sparse_binary_vector - - -def parse_arguments(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--train_data", type=str, required=False, help="train data file") - parser.add_argument("--test_data", type=str, help="test data file") - parser.add_argument( - "--config", type=str, required=True, help="config file name") - parser.add_argument("--dict_file", required=True, help="dictionary file") - parser.add_argument( - "--seq", default=1, type=int, help="whether use sequence training") - parser.add_argument( - "--use_gpu", default=0, type=int, help="whether use GPU for training") - parser.add_argument( - "--trainer_count", - default=1, - type=int, - help="Number of threads for training") - parser.add_argument( - "--num_passes", default=5, type=int, help="Number of training passes") - return parser.parse_args() - - -UNK_IDX = 0 - - -def load_data(file_name, word_dict): - with open(file_name, 'r') as f: - for line in f: - label, comment = line.strip().split('\t') - words = comment.split() - word_slot = [word_dict.get(w, UNK_IDX) for w in words] - yield word_slot, int(label) - - -def load_dict(dict_file): - word_dict = dict() - with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - return word_dict - - -def main(): - options = parse_arguments() - api.initPaddle("--use_gpu=%s" % options.use_gpu, - "--trainer_count=%s" % options.trainer_count) - - word_dict = load_dict(options.dict_file) - train_dataset = list(load_data(options.train_data, word_dict)) - if options.test_data: - test_dataset = list(load_data(options.test_data, word_dict)) - else: - test_dataset = None - - trainer_config = parse_config(options.config, - "dict_file=%s" % options.dict_file) - # No need to have data provider for trainer - trainer_config.ClearField('data_config') - trainer_config.ClearField('test_data_config') - - # create a GradientMachine from the model configuratin - model = api.GradientMachine.createFromConfigProto( - trainer_config.model_config) - # create a trainer for the gradient machine - trainer = api.Trainer.create(trainer_config, model) - - # create a data converter which converts data to PaddlePaddle - # internal format - input_types = [ - integer_value_sequence(len(word_dict)) if options.seq else - sparse_binary_vector(len(word_dict)), integer_value(2) - ] - converter = DataProviderConverter(input_types) - - batch_size = trainer_config.opt_config.batch_size - trainer.startTrain() - for train_pass in xrange(options.num_passes): - trainer.startTrainPass() - random.shuffle(train_dataset) - for pos in xrange(0, len(train_dataset), batch_size): - batch = itertools.islice(train_dataset, pos, pos + batch_size) - size = min(batch_size, len(train_dataset) - pos) - trainer.trainOneDataBatch(size, converter(batch)) - trainer.finishTrainPass() - if test_dataset: - trainer.startTestPeriod() - for pos in xrange(0, len(test_dataset), batch_size): - batch = itertools.islice(test_dataset, pos, pos + batch_size) - size = min(batch_size, len(test_dataset) - pos) - trainer.testOneDataBatch(size, converter(batch)) - trainer.finishTestPeriod() - trainer.finishTrain() - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/quick_start/api_train.sh b/v1_api_demo/quick_start/api_train.sh deleted file mode 100755 index 9b2a4e2f224b1677c458ede66a6a3bac09d8ad61..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/api_train.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -# Note: if using trainer_config.emb.py, trainer_config.cnn.py -# or trainer_config.lstm.py, you need to change --seq to --seq=1 -# because they are sequence models. -python api_train.py \ - --config=trainer_config.lr.py \ - --trainer_count=2 \ - --num_passes=15 \ - --use_gpu=0 \ - --seq=0 \ - --train_data=data/train.txt \ - --test_data=data/test.txt \ - --dict_file=data/dict.txt \ - 2>&1 | tee 'train.log' diff --git a/v1_api_demo/quick_start/cluster/cluster_train.sh b/v1_api_demo/quick_start/cluster/cluster_train.sh deleted file mode 100755 index a7b1f01064b29cf6abc4cd6b706ee466a6d6da36..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/cluster/cluster_train.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -# Should run pserver.sh before run this script. -bin_dir=$(cd `dirname $0`; pwd) -home_dir=$(cd "${bin_dir}/.."; pwd) -source "$bin_dir/env.sh" - -model_dir="$bin_dir/output" -log_file="$bin_dir/train.log" - -pushd "$home_dir" -cfg=trainer_config.lr.py -paddle train \ - --start_pserver=false \ - --config=$cfg \ - --save_dir=${model_dir} \ - --trainer_count=4 \ - --local=0 \ - --log_period=100 \ - --num_passes=15 \ - --use_gpu=false \ - --show_parameter_stats_period=100 \ - --test_all_data_in_one_period=1 \ - --num_gradient_servers=1 \ - --nics=`get_nics` \ - --port=7164 \ - --ports_num=1 \ - --pservers="127.0.0.1" \ - --comment="paddle_trainer" \ - 2>&1 | tee "$log_file" -popd diff --git a/v1_api_demo/quick_start/cluster/env.sh b/v1_api_demo/quick_start/cluster/env.sh deleted file mode 100644 index a404993835d0e479f65c89c5561855293b7b66f0..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/cluster/env.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -function get_nics() { - machine=`uname -s` - local nics="" - if [ "$machine" == "Linux" ]; then - nics="lo" - elif [ "$machine" == "Darwin" ]; then - nics="lo0" - else - nics="unsupport" - fi - echo $nics -} diff --git a/v1_api_demo/quick_start/cluster/pserver.sh b/v1_api_demo/quick_start/cluster/pserver.sh deleted file mode 100755 index b187c1d9b9108a607ed310253d54ecc096f0e792..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/cluster/pserver.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -bin_dir=$(cd `dirname $0`; pwd) -source "$bin_dir/env.sh" - -paddle pserver \ - --nics=`get_nics` \ - --port=7164 \ - --ports_num=1 \ - --ports_num_for_sparse=1 \ - --num_gradient_servers=1 \ - --comment="paddle_pserver" \ - 2>&1 | tee 'pserver.log' diff --git a/v1_api_demo/quick_start/data/README.md b/v1_api_demo/quick_start/data/README.md deleted file mode 100644 index 63abcf7ebf31903213e44cf492b93e09f61db14e..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/data/README.md +++ /dev/null @@ -1,9 +0,0 @@ -This dataset consists of electronics product reviews associated with -binary labels (positive/negative) for sentiment classification. - -The preprocessed data can be downloaded by script `get_data.sh`. -The data was derived from reviews_Electronics_5.json.gz at - -http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz - -If you want to process the raw data, you can use the script `proc_from_raw_data/get_data.sh`. diff --git a/v1_api_demo/quick_start/data/get_data.sh b/v1_api_demo/quick_start/data/get_data.sh deleted file mode 100755 index a09a18f919e5a84f1f7c889a43f0a5fbf4a60a77..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/data/get_data.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -# Download the preprocessed data -wget http://paddlepaddle.bj.bcebos.com/demo/quick_start_preprocessed_data/preprocessed_data.tar.gz - -# Extract package -tar zxvf preprocessed_data.tar.gz - -# Remove compressed package -rm preprocessed_data.tar.gz diff --git a/v1_api_demo/quick_start/data/proc_from_raw_data/get_data.sh b/v1_api_demo/quick_start/data/proc_from_raw_data/get_data.sh deleted file mode 100755 index d976eaebfaa600778e0ab6bb0adbd7159f1cce2f..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/data/proc_from_raw_data/get_data.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# 1. size of pos : neg = 1:1. -# 2. size of testing set = min(25k, len(all_data) * 0.1), others is traning set. -# 3. distinct train set and test set. - -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -# Download data -echo "Downloading Amazon Electronics reviews data..." -# http://jmcauley.ucsd.edu/data/amazon/ -wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz -echo "Downloading mosesdecoder..." -# https://github.com/moses-smt/mosesdecoder -wget https://github.com/moses-smt/mosesdecoder/archive/master.zip - -unzip master.zip -rm master.zip - -################## -# Preprocess data -echo "Preprocess data..." -export LC_ALL=C -UNAME_STR=`uname` - -if [ ${UNAME_STR} == 'Linux' ]; then - SHUF_PROG='shuf' -else - SHUF_PROG='gshuf' -fi - -mkdir -p tmp -python preprocess.py -i reviews_Electronics_5.json.gz -# uniq and shuffle -cd tmp -echo 'Uniq and shuffle...' -cat pos_*|sort|uniq|${SHUF_PROG}> pos.shuffed -cat neg_*|sort|uniq|${SHUF_PROG}> neg.shuffed - -min_len=`sed -n '$=' neg.shuffed` -test_num=$((min_len/10)) -if [ $test_num -gt 12500 ];then - test_num=12500 -fi -train_num=$((min_len-test_num)) - -head -n$train_num pos.shuffed >train.pos -head -n$train_num neg.shuffed >train.neg -tail -n$test_num pos.shuffed >test.pos -tail -n$test_num neg.shuffed >test.neg - -cat train.pos train.neg | ${SHUF_PROG} >../train.txt -cat test.pos test.neg | ${SHUF_PROG} >../test.txt - -cd - -echo 'train.txt' > train.list -echo 'test.txt' > test.list - -# use 30k dict -rm -rf tmp -mv dict.txt dict_all.txt -cat dict_all.txt | head -n 30001 > dict.txt -echo 'Done.' diff --git a/v1_api_demo/quick_start/data/proc_from_raw_data/preprocess.py b/v1_api_demo/quick_start/data/proc_from_raw_data/preprocess.py deleted file mode 100755 index 72bd95f21d8bde8b3d1962ea10ecf6fc7d0ea478..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/data/proc_from_raw_data/preprocess.py +++ /dev/null @@ -1,223 +0,0 @@ -# -*- coding: UTF-8 -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -1. Tokenize the words and punctuation -2. pos sample : rating score 5; neg sample: rating score 1-2. - -Usage: - python preprocess.py -i data_file [random seed] -""" - -import sys -import os -import operator -import gzip -from subprocess import Popen, PIPE -from optparse import OptionParser -import json -from multiprocessing import Queue -from multiprocessing import Pool -import multiprocessing - -batch_size = 5000 -word_count = {} -num_tokenize = max(1, - multiprocessing.cpu_count() - 2) # parse + tokenize + save -max_queue_size = 8 -parse_queue = Queue(maxsize=max_queue_size + num_tokenize) -tokenize_queue = Queue(maxsize=max_queue_size + num_tokenize) - - -def create_dict(data): - """ - Create dictionary based on data, and saved in data_dir/dict.txt. - The first line is unk \t -1. - data: list, input data by batch. - """ - for seq in data: - try: - for w in seq.lower().split(): - if w not in word_count: - word_count[w] = 1 - else: - word_count[w] += 1 - except: - sys.stderr.write(seq + "\tERROR\n") - - -def parse(path): - """ - Open .gz file. - """ - sys.stderr.write(path) - g = gzip.open(path, 'r') - for l in g: - yield json.loads(l) - g.close() - - -def tokenize(sentences): - """ - Use tokenizer.perl to tokenize input sentences. - tokenizer.perl is tool of Moses. - sentences : a list of input sentences. - return: a list of processed text. - """ - dir = './mosesdecoder-master/scripts/tokenizer/tokenizer.perl' - if not os.path.exists(dir): - sys.exit( - "The ./mosesdecoder-master/scripts/tokenizer/tokenizer.perl does not exists." - ) - tokenizer_cmd = [dir, '-l', 'en', '-q', '-'] - assert isinstance(sentences, list) - text = "\n".join(sentences) - tokenizer = Popen(tokenizer_cmd, stdin=PIPE, stdout=PIPE) - tok_text, _ = tokenizer.communicate(text) - toks = tok_text.split('\n')[:-1] - return toks - - -def save_data(instance, data_dir, pre_fix, batch_num): - """ - save data by batch - """ - label = ['1' if pre_fix == 'pos' else '0' for i in range(len(instance))] - lines = ['%s\t%s' % (label[i], instance[i]) for i in range(len(label))] - file_name = os.path.join(data_dir, "%s_%s.txt" % (pre_fix, batch_num)) - file(file_name, 'w').write('\n'.join(lines) + '\n') - - -def tokenize_batch(id): - """ - tokenize data by batch - """ - while True: - num_batch, instance, pre_fix = parse_queue.get() - if num_batch == -1: ### parse_queue finished - tokenize_queue.put((-1, None, None)) - sys.stderr.write("Thread %s finish\n" % (id)) - break - tokenize_instance = tokenize(instance) - tokenize_queue.put((num_batch, tokenize_instance, pre_fix)) - sys.stderr.write('.') - - -def save_batch(data_dir, num_tokenize, data_dir_dict): - """ - save data by batch - build dict.txt - """ - token_count = 0 - while True: - num_batch, instance, pre_fix = tokenize_queue.get() - if num_batch == -1: - token_count += 1 - if token_count == num_tokenize: #### tokenize finished. - break - else: - continue - save_data(instance, data_dir, pre_fix, num_batch) - create_dict(instance) ## update dict - - sys.stderr.write("save file finish\n") - f = open(data_dir_dict, 'w') - f.write('%s\t%s\n' % ('unk', '-1')) - for k, v in sorted(word_count.items(), key=operator.itemgetter(1), \ - reverse=True): - f.write('%s\t%s\n' % (k, v)) - f.close() - sys.stderr.write("build dict finish\n") - - -def parse_batch(data, num_tokenize): - """ - parse data by batch - parse -> tokenize -> save - """ - raw_txt = parse(data) - neg, pos = [], [] - count = 0 - sys.stderr.write("extract raw data\n") - for l in raw_txt: - rating = l["overall"] - text = l["reviewText"].lower() # # convert words to lower case - if rating == 5.0 and text: - pos.append(text) - if rating < 3.0 and text: - neg.append(text) - if len(pos) == batch_size or len(neg) == batch_size: - if len(pos) == batch_size: - batch = pos - pre_fix = 'pos' - else: - batch = neg - pre_fix = 'neg' - - parse_queue.put((count, batch, pre_fix)) - count += 1 - if pre_fix == 'pos': - pos = [] - else: - neg = [] - - if len(pos) > 0: - parse_queue.put((count, pos, 'pos')) - count += 1 - if len(neg) > 0: - parse_queue.put((count, neg, 'neg')) - count += 1 - for i in range(num_tokenize): - parse_queue.put((-1, None, None)) #### for tokenize's input finished - sys.stderr.write("parsing finish\n") - - -def option_parser(): - parser = OptionParser(usage="usage: python preprcoess.py "\ - "-i data_path [options]") - parser.add_option( - "-i", "--data", action="store", dest="input", help="Input data path.") - parser.add_option( - "-s", - "--seed", - action="store", - dest="seed", - default=1024, - help="Set random seed.") - return parser.parse_args() - - -def main(): - reload(sys) - sys.setdefaultencoding('utf-8') - options, args = option_parser() - data = options.input - seed = options.seed - data_dir_dict = os.path.join(os.path.dirname(data), 'dict.txt') - data_dir = os.path.join(os.path.dirname(data), 'tmp') - pool = Pool(processes=num_tokenize + 2) - pool.apply_async(parse_batch, args=(data, num_tokenize)) - for i in range(num_tokenize): - pool.apply_async(tokenize_batch, args=(str(i), )) - pool.apply_async(save_batch, args=(data_dir, num_tokenize, data_dir_dict)) - pool.close() - pool.join() - - file(os.path.join(os.path.dirname(data), 'labels.list'), - 'w').write('neg\t0\npos\t1\n') - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/quick_start/dataprovider_bow.py b/v1_api_demo/quick_start/dataprovider_bow.py deleted file mode 100644 index 2745495586449b5d1eb64ae570f73eb6b14dbdfe..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/dataprovider_bow.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * - -# id of the word not in dictionary -UNK_IDX = 0 - - -# initializer is called by the framework during initialization. -# It allows the user to describe the data types and setup the -# necessary data structure for later use. -# `settings` is an object. initializer need to properly fill settings.input_types. -# initializer can also store other data structures needed to be used at process(). -# In this example, dictionary is stored in settings. -# `dictionay` and `kwargs` are arguments passed from trainer_config.lr.py -def initializer(settings, dictionary, **kwargs): - # Put the word dictionary into settings - settings.word_dict = dictionary - - # setting.input_types specifies what the data types the data provider - # generates. - settings.input_types = { - # The first input is a sparse_binary_vector, - # which means each dimension of the vector is either 0 or 1. It is the - # bag-of-words (BOW) representation of the texts. - 'word': sparse_binary_vector(len(dictionary)), - # The second input is an integer. It represents the category id of the - # sample. 2 means there are two labels in the dataset. - # (1 for positive and 0 for negative) - 'label': integer_value(2) - } - - -# Delaring a data provider. It has an initializer 'data_initialzer'. -# It will cache the generated data of the first pass in memory, so that -# during later pass, no on-the-fly data generation will be needed. -# `setting` is the same object used by initializer() -# `file_name` is the name of a file listed train_list or test_list file given -# to define_py_data_sources2(). See trainer_config.lr.py. -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, file_name): - # Open the input data file. - with open(file_name, 'r') as f: - # Read each line. - for line in f: - # Each line contains the label and text of the comment, separated by \t. - label, comment = line.strip().split('\t') - - # Split the words into a list. - words = comment.split() - - # convert the words into a list of ids by looking them up in word_dict. - word_vector = [settings.word_dict.get(w, UNK_IDX) for w in words] - - # Return the features for the current comment. The first is a list - # of ids representing a 0-1 binary sparse vector of the text, - # the second is the integer id of the label. - yield {'word': word_vector, 'label': int(label)} - - -def predict_initializer(settings, dictionary, **kwargs): - settings.word_dict = dictionary - settings.input_types = {'word': sparse_binary_vector(len(dictionary))} - - -# Declaring a data provider for prediction. The difference with process -# is that label is not generated. -@provider(init_hook=predict_initializer, should_shuffle=False) -def process_predict(settings, file_name): - with open(file_name, 'r') as f: - for line in f: - comment = line.strip().split() - word_vector = [settings.word_dict.get(w, UNK_IDX) for w in comment] - yield {'word': word_vector} diff --git a/v1_api_demo/quick_start/dataprovider_emb.py b/v1_api_demo/quick_start/dataprovider_emb.py deleted file mode 100755 index ddfa3ce9b73555cb3b7f5a44314ca35b12d41ede..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/dataprovider_emb.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * - -UNK_IDX = 0 - - -def initializer(settings, dictionary, **kwargs): - settings.word_dict = dictionary - settings.input_types = { - # Define the type of the first input as sequence of integer. - # The value of the integers range from 0 to len(dictrionary)-1 - 'word': integer_value_sequence(len(dictionary)), - # Define the second input for label id - 'label': integer_value(2) - } - - -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, file_name): - with open(file_name, 'r') as f: - for line in f: - label, comment = line.strip().split('\t') - words = comment.split() - word_slot = [settings.word_dict.get(w, UNK_IDX) for w in words] - yield {'word': word_slot, 'label': int(label)} - - -def predict_initializer(settings, dictionary, **kwargs): - settings.word_dict = dictionary - settings.input_types = {'word': integer_value_sequence(len(dictionary))} - - -@provider(init_hook=predict_initializer, should_shuffle=False) -def process_predict(settings, file_name): - with open(file_name, 'r') as f: - for line in f: - comment = line.strip().split() - word_slot = [settings.word_dict.get(w, UNK_IDX) for w in comment] - yield {'word': word_slot} diff --git a/v1_api_demo/quick_start/predict.sh b/v1_api_demo/quick_start/predict.sh deleted file mode 100755 index e47c2dd01fb5c919203964e298018e6dc2bd366e..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/predict.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.lr.py -#cfg=trainer_config.emb.py -#cfg=trainer_config.cnn.py -#cfg=trainer_config.lstm.py -model="output/pass-00003" -paddle train \ - --config=$cfg \ - --use_gpu=false \ - --job=test \ - --init_model_path=$model \ - --config_args=is_predict=1 \ - --predict_output_dir=. \ -2>&1 | tee 'predict.log' -paddle usage -l 'predict.log' -e $? -n "quick_start_predict_${cfg}" >/dev/null 2>&1 - -mv rank-00000 result.txt diff --git a/v1_api_demo/quick_start/train.sh b/v1_api_demo/quick_start/train.sh deleted file mode 100755 index 01697fed48054be8ad98a01d4cbb5029e6a1ead0..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/train.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.lr.py -#cfg=trainer_config.emb.py -#cfg=trainer_config.cnn.py -#cfg=trainer_config.lstm.py -#cfg=trainer_config.bidi-lstm.py -#cfg=trainer_config.db-lstm.py -#cfg=trainer_config.resnet-lstm.py -paddle train \ - --config=$cfg \ - --save_dir=./output \ - --trainer_count=4 \ - --log_period=100 \ - --num_passes=15 \ - --use_gpu=false \ - --show_parameter_stats_period=100 \ - --test_all_data_in_one_period=1 \ - 2>&1 | tee 'train.log' -paddle usage -l "train.log" -e $? -n "quick_start_${cfg}" >/dev/null 2>&1 diff --git a/v1_api_demo/quick_start/trainer_config.bidi-lstm.py b/v1_api_demo/quick_start/trainer_config.bidi-lstm.py deleted file mode 100644 index ca1d1f8d099b5a3f5276c108855c5e890e7214fe..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/trainer_config.bidi-lstm.py +++ /dev/null @@ -1,61 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -bias_attr = ParamAttr(initial_std=0., l2_rate=0.) -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) - -bi_lstm = bidirectional_lstm(input=emb, size=128) -dropout = dropout_layer(input=bi_lstm, dropout_rate=0.5) - -output = fc_layer( - input=dropout, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) - -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.cnn.py b/v1_api_demo/quick_start/trainer_config.cnn.py deleted file mode 100644 index f8c3d511f323ed9ec96be0a1951014c6db639003..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/trainer_config.cnn.py +++ /dev/null @@ -1,55 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -data = data_layer(name="word", size=len(word_dict)) -embedding = embedding_layer(input=data, size=128) -conv = sequence_conv_pool(input=embedding, context_len=3, hidden_size=512) -output = fc_layer(input=conv, size=2, act=SoftmaxActivation()) -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.db-lstm.py b/v1_api_demo/quick_start/trainer_config.db-lstm.py deleted file mode 100644 index fba802b4600b33cfbfd0820cce1f47e4d0f948ae..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/trainer_config.db-lstm.py +++ /dev/null @@ -1,74 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -bias_attr = ParamAttr(initial_std=0., l2_rate=0.) - -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) - -hidden_0 = mixed_layer(size=128, input=[full_matrix_projection(input=emb)]) -lstm_0 = lstmemory(input=hidden_0, layer_attr=ExtraAttr(drop_rate=0.1)) - -input_layers = [hidden_0, lstm_0] - -for i in range(1, 8): - fc = fc_layer(input=input_layers, size=128) - lstm = lstmemory( - input=fc, - layer_attr=ExtraAttr(drop_rate=0.1), - reverse=(i % 2) == 1, ) - input_layers = [fc, lstm] - -lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) - -output = fc_layer( - input=lstm_last, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) - -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.emb.py b/v1_api_demo/quick_start/trainer_config.emb.py deleted file mode 100644 index 7410397ef656e363b232787995d3a869cd11b655..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/trainer_config.emb.py +++ /dev/null @@ -1,51 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, learning_rate=2e-3, learning_method=AdamOptimizer()) - -data = data_layer(name="word", size=len(word_dict)) -embedding = embedding_layer(input=data, size=128) -avg = pooling_layer(input=embedding, pooling_type=AvgPooling()) -output = fc_layer(input=avg, size=2, act=SoftmaxActivation()) -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.lr.py b/v1_api_demo/quick_start/trainer_config.lr.py deleted file mode 100644 index e5105aa89532d71c80c8ec77ca98ac6a8e9c8c58..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/trainer_config.lr.py +++ /dev/null @@ -1,72 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = get_config_arg('dict_file', str, "./data/dict.txt") -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' - -# define the data sources for the model. -# We need to use different process for training and prediction. -# For training, the input data includes both word IDs and labels. -# For prediction, the input data only includs word Ids. -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_bow", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -# Define the data for text features. The size of the data layer is the number -# of words in the dictionary. -data = data_layer(name="word", size=len(word_dict)) - -# Define a fully connected layer with logistic activation. -# (also called softmax activation). -output = fc_layer(input=data, size=2, act=SoftmaxActivation()) - -if not is_predict: - # For training, we need label and cost - - # define the category id for each example. - # The size of the data layer is the number of labels. - label = data_layer(name="label", size=2) - - # Define cross-entropy classification loss and error. - cls = classification_cost(input=output, label=label) - outputs(cls) -else: - # For prediction, no label is needed. We need to output - # We need to output classification result, and class probabilities. - maxid = maxid_layer(output) - outputs([maxid, output]) diff --git a/v1_api_demo/quick_start/trainer_config.lstm.py b/v1_api_demo/quick_start/trainer_config.lstm.py deleted file mode 100644 index 43b4ddac2dca5f6b9aa28f055e843abf12e92312..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/trainer_config.lstm.py +++ /dev/null @@ -1,57 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) -lstm = simple_lstm( - input=emb, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.25)) -lstm_max = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer(input=lstm_max, size=2, act=SoftmaxActivation()) -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.resnet-lstm.py b/v1_api_demo/quick_start/trainer_config.resnet-lstm.py deleted file mode 100644 index 89a837abb7cdeaaa249160123e1f2001d23d7aa1..0000000000000000000000000000000000000000 --- a/v1_api_demo/quick_start/trainer_config.resnet-lstm.py +++ /dev/null @@ -1,91 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This configuration is a demonstration of how to implement the stacked LSTM -with residual connections, i.e. an LSTM layer takes the sum of the hidden states -and inputs of the previous LSTM layer instead of only the hidden states. -This architecture is from: -Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, -Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, -Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, -Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, -George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, -Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, Jeffrey Dean. 2016. -Google's Neural Machine Translation System: Bridging the Gap between Human and -Machine Translation. In arXiv https://arxiv.org/pdf/1609.08144v2.pdf -Different from the architecture described in the paper, we use a stack single -direction LSTM layers as the first layer instead of bi-directional LSTM. Also, -since this is a demo code, to reduce computation time, we stacked 4 layers -instead of 8 layers. -""" - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -bias_attr = ParamAttr(initial_std=0., l2_rate=0.) - -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) -lstm = simple_lstm(input=emb, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.1)) - -previous_input, previous_hidden_state = emb, lstm - -for i in range(3): - # The input to the current layer is the sum of the hidden state - # and input of the previous layer. - current_input = addto_layer(input=[previous_input, previous_hidden_state]) - hidden_state = simple_lstm( - input=current_input, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.1)) - previous_input, previous_hidden_state = current_input, hidden_state - -lstm = previous_hidden_state - -lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer( - input=lstm_last, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) - -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/sequence_tagging/data/get_data.sh b/v1_api_demo/sequence_tagging/data/get_data.sh deleted file mode 100755 index 0cdb394035e782b3a647f7f13e79d55b5d3dff48..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/data/get_data.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -wget http://www.cnts.ua.ac.be/conll2000/chunking/train.txt.gz -wget http://www.cnts.ua.ac.be/conll2000/chunking/test.txt.gz diff --git a/v1_api_demo/sequence_tagging/data/test.list b/v1_api_demo/sequence_tagging/data/test.list deleted file mode 100644 index 073c0a0c9063ac55f762ac261746aa73057d70e8..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/data/test.list +++ /dev/null @@ -1 +0,0 @@ -data/test.txt.gz diff --git a/v1_api_demo/sequence_tagging/data/train.list b/v1_api_demo/sequence_tagging/data/train.list deleted file mode 100644 index 43c24d5f6484a90fe883ad5516fe100d27c9ce47..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/data/train.list +++ /dev/null @@ -1 +0,0 @@ -data/train.txt.gz diff --git a/v1_api_demo/sequence_tagging/dataprovider.py b/v1_api_demo/sequence_tagging/dataprovider.py deleted file mode 100644 index bb4b4465bc7e032c50c1d21263651e2578af67be..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/dataprovider.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * -import gzip -import logging - -logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', ) -logger = logging.getLogger('paddle') -logger.setLevel(logging.INFO) - -OOV_POLICY_IGNORE = 0 -OOV_POLICY_USE = 1 -OOV_POLICY_ERROR = 2 - -num_original_columns = 3 - -# Feature combination patterns. -# [[-1,0], [0,0]] means previous token at column 0 and current token at -# column 0 are combined as one feature. -patterns = [ - [[-2, 0]], - [[-1, 0]], - [[0, 0]], - [[1, 0]], - [[2, 0]], - [[-1, 0], [0, 0]], - [[0, 0], [1, 0]], - [[-2, 1]], - [[-1, 1]], - [[0, 1]], - [[1, 1]], - [[2, 1]], - [[-2, 1], [-1, 1]], - [[-1, 1], [0, 1]], - [[0, 1], [1, 1]], - [[1, 1], [2, 1]], - [[-2, 1], [-1, 1], [0, 1]], - [[-1, 1], [0, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]], -] - -dict_label = { - 'B-ADJP': 0, - 'I-ADJP': 1, - 'B-ADVP': 2, - 'I-ADVP': 3, - 'B-CONJP': 4, - 'I-CONJP': 5, - 'B-INTJ': 6, - 'I-INTJ': 7, - 'B-LST': 8, - 'I-LST': 9, - 'B-NP': 10, - 'I-NP': 11, - 'B-PP': 12, - 'I-PP': 13, - 'B-PRT': 14, - 'I-PRT': 15, - 'B-SBAR': 16, - 'I-SBAR': 17, - 'B-UCP': 18, - 'I-UCP': 19, - 'B-VP': 20, - 'I-VP': 21, - 'O': 22 -} - - -def make_features(sequence): - length = len(sequence) - num_features = len(sequence[0]) - - def get_features(pos): - if pos < 0: - return ['#B%s' % -pos] * num_features - if pos >= length: - return ['#E%s' % (pos - length + 1)] * num_features - return sequence[pos] - - for i in xrange(length): - for pattern in patterns: - fname = '/'.join([get_features(i + pos)[f] for pos, f in pattern]) - sequence[i].append(fname) - - -''' -Source file format: -Each line is for one timestep. The features are separated by space. -An empty line indicates end of a sequence. - -cutoff: a list of numbers. If count of a feature is smaller than this, - it will be ignored. -if oov_policy[i] is OOV_POLICY_USE, id 0 is reserved for OOV features of -i-th column. - -return a list of dict for each column -''' - - -def create_dictionaries(filename, cutoff, oov_policy): - def add_to_dict(sequence, dicts): - num_features = len(dicts) - for features in sequence: - l = len(features) - assert l == num_features, "Wrong number of features " + line - for i in xrange(l): - if features[i] in dicts[i]: - dicts[i][features[i]] += 1 - else: - dicts[i][features[i]] = 1 - - num_features = len(cutoff) - dicts = [] - for i in xrange(num_features): - dicts.append(dict()) - - f = gzip.open(filename, 'rb') - - sequence = [] - - for line in f: - line = line.strip() - if not line: - make_features(sequence) - add_to_dict(sequence, dicts) - sequence = [] - continue - features = line.split(' ') - sequence.append(features) - - for i in xrange(num_features): - dct = dicts[i] - n = 1 if oov_policy[i] == OOV_POLICY_USE else 0 - todo = [] - for k, v in dct.iteritems(): - if v < cutoff[i]: - todo.append(k) - else: - dct[k] = n - n += 1 - - if oov_policy[i] == OOV_POLICY_USE: - # placeholder so that len(dct) will be the number of features - # including OOV - dct['#OOV#'] = 0 - - logger.info('column %d dict size=%d, ignored %d' % (i, n, len(todo))) - for k in todo: - del dct[k] - - f.close() - return dicts - - -def initializer(settings, **xargs): - cutoff = [3, 1, 0] - cutoff += [3] * len(patterns) - oov_policy = [OOV_POLICY_IGNORE, OOV_POLICY_ERROR, OOV_POLICY_ERROR] - oov_policy += [OOV_POLICY_IGNORE] * len(patterns) - dicts = create_dictionaries('data/train.txt.gz', cutoff, oov_policy) - dicts[2] = dict_label - settings.dicts = dicts - settings.oov_policy = oov_policy - input_types = [] - num_features = len(dicts) - for i in xrange(num_original_columns): - input_types.append(integer_sequence(len(dicts[i]))) - logger.info("slot %s size=%s" % (i, len(dicts[i]))) - if patterns: - dim = 0 - for i in xrange(num_original_columns, num_features): - dim += len(dicts[i]) - input_types.append(sparse_binary_vector_sequence(dim)) - logger.info("feature size=%s" % dim) - settings.input_types = input_types - - -''' -if oov_policy[i] == OOV_POLICY_USE, features in i-th column which are not -existed in dicts[i] will be assigned to id 0. -if oov_policy[i] == OOV_POLICY_ERROR, all features in i-th column MUST exist -in dicts[i]. -''' - - -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, filename): - input_file = filename - dicts = settings.dicts - oov_policy = settings.oov_policy - - def gen_sample(sequence): - num_features = len(dicts) - sample = [list() for i in xrange(num_original_columns)] - if patterns: - sample.append([]) - for features in sequence: - assert len(features) == num_features, \ - "Wrong number of features: " + line - for i in xrange(num_original_columns): - id = dicts[i].get(features[i], -1) - if id != -1: - sample[i].append(id) - elif oov_policy[i] == OOV_POLICY_IGNORE: - sample[i].append(0xffffffff) - elif oov_policy[i] == OOV_POLICY_ERROR: - logger.fatal("Unknown token: %s" % features[i]) - else: - sample[i].append(0) - - if patterns: - dim = 0 - vec = [] - for i in xrange(num_original_columns, num_features): - id = dicts[i].get(features[i], -1) - if id != -1: - vec.append(dim + id) - elif oov_policy[i] == OOV_POLICY_IGNORE: - pass - elif oov_policy[i] == OOV_POLICY_ERROR: - logger.fatal("Unknown token: %s" % features[i]) - else: - vec.ids.append(dim + 0) - - dim += len(dicts[i]) - sample[-1].append(vec) - return sample - - num_features = len(dicts) - f = gzip.open(input_file, 'rb') - - num_sequences = 0 - sequence = [] - for line in f: - line = line.strip() - if not line: - make_features(sequence) - yield gen_sample(sequence) - sequence = [] - num_sequences += 1 - continue - features = line.split(' ') - sequence.append(features) - - f.close() - - logger.info("num_sequences=%s" % num_sequences) diff --git a/v1_api_demo/sequence_tagging/linear_crf.py b/v1_api_demo/sequence_tagging/linear_crf.py deleted file mode 100644 index ea012ba1ae9c790ccefd3dd5f066aa92202128a2..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/linear_crf.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -import math - -define_py_data_sources2( - train_list="data/train.list", - test_list="data/test.list", - module="dataprovider", - obj="process") - -batch_size = 1 -settings( - learning_method=MomentumOptimizer(), - batch_size=batch_size, - regularization=L2Regularization(batch_size * 1e-4), - model_average=ModelAverage(0.5), - learning_rate=1e-1, - learning_rate_decay_a=1e-5, - learning_rate_decay_b=0.25, ) - -num_label_types = 23 - - -def get_simd_size(size): - return int(math.ceil(float(size) / 8)) * 8 - - -# Currently, in order to use sparse_update=True, -# the size has to be aligned. -num_label_types = get_simd_size(num_label_types) - -features = data_layer(name="features", size=76328) -word = data_layer(name="word", size=6778) -pos = data_layer(name="pos", size=44) -chunk = data_layer(name="chunk", size=num_label_types) - -crf_input = fc_layer( - input=features, - size=num_label_types, - act=LinearActivation(), - bias_attr=False, - param_attr=ParamAttr( - initial_std=0, sparse_update=True)) - -crf = crf_layer( - input=crf_input, - label=chunk, - param_attr=ParamAttr( - name="crfw", initial_std=0), ) - -crf_decoding = crf_decoding_layer( - size=num_label_types, - input=crf_input, - label=chunk, - param_attr=ParamAttr(name="crfw"), ) - -sum_evaluator( - name="error", - input=crf_decoding, ) - -chunk_evaluator( - name="chunk_f1", - input=crf_decoding, - label=chunk, - chunk_scheme="IOB", - num_chunk_types=11, ) - -inputs(word, pos, chunk, features) -outputs(crf) diff --git a/v1_api_demo/sequence_tagging/readme.md b/v1_api_demo/sequence_tagging/readme.md deleted file mode 100644 index 2e17fffb83c532f5e5fec1227f169c97c1f20e22..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/readme.md +++ /dev/null @@ -1,45 +0,0 @@ -# Sequence Tagging - -This demo is a sequence model for assigning tags to each token in a sentence. The task is described at CONLL2000 Text Chunking task. - -## Download data -```bash -cd demo/sequence_tagging -./data/get_data.sh -``` - -## Train model -```bash -cd demo/sequence_tagging -./train.sh -``` - -## Model description - -We provide two models. One is a linear CRF model (linear_crf.py) with is equivalent to the one at leon.bottou.org/projects/sgd. The second one is a stacked bidirectional RNN and CRF model (rnn_crf.py). -
- - - - - - - - - - - - - - - - - - - - - - -
Model nameNumber of parametersF1 score
linear_crf 1.8M 0.937
rnn_crf 960K 0.941
-
-
diff --git a/v1_api_demo/sequence_tagging/rnn_crf.py b/v1_api_demo/sequence_tagging/rnn_crf.py deleted file mode 100644 index 937a34df103663ecf0f0827bbfb9d82823c9b902..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/rnn_crf.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -import math - -define_py_data_sources2( - train_list="data/train.list", - test_list="data/test.list", - module="dataprovider", - obj="process") - -batch_size = 16 -settings( - learning_method=MomentumOptimizer(), - batch_size=batch_size, - regularization=L2Regularization(batch_size * 1e-5), - model_average=ModelAverage(0.5), - learning_rate=2e-3, - learning_rate_decay_a=5e-7, - learning_rate_decay_b=0.5, ) - -word_dim = 128 -hidden_dim = 128 -with_rnn = True - -initial_std = 1 / math.sqrt(hidden_dim) -param_attr = ParamAttr(initial_std=initial_std) -cpu_layer_attr = ExtraLayerAttribute(device=-1) - -default_device(0) - -num_label_types = 23 - -features = data_layer(name="features", size=76328) -word = data_layer(name="word", size=6778) -pos = data_layer(name="pos", size=44) -chunk = data_layer( - name="chunk", size=num_label_types, layer_attr=cpu_layer_attr) - -emb = embedding_layer( - input=word, size=word_dim, param_attr=ParamAttr(initial_std=0)) - -hidden1 = mixed_layer( - size=hidden_dim, - act=STanhActivation(), - bias_attr=True, - input=[ - full_matrix_projection(emb), table_projection( - pos, param_attr=param_attr) - ]) - -if with_rnn: - rnn1 = recurrent_layer( - act=ReluActivation(), - bias_attr=True, - input=hidden1, - param_attr=ParamAttr(initial_std=0), ) - -hidden2 = mixed_layer( - size=hidden_dim, - act=STanhActivation(), - bias_attr=True, - input=[full_matrix_projection(hidden1)] + - ([full_matrix_projection( - rnn1, param_attr=ParamAttr(initial_std=0))] if with_rnn else []), ) - -if with_rnn: - rnn2 = recurrent_layer( - reverse=True, - act=ReluActivation(), - bias_attr=True, - input=hidden2, - param_attr=ParamAttr(initial_std=0), ) - -crf_input = mixed_layer( - size=num_label_types, - bias_attr=False, - input=[full_matrix_projection(hidden2), ] + - ([full_matrix_projection( - rnn2, param_attr=ParamAttr(initial_std=0))] if with_rnn else []), ) - -crf = crf_layer( - input=crf_input, - label=chunk, - param_attr=ParamAttr( - name="crfw", initial_std=0), - layer_attr=cpu_layer_attr, ) - -crf_decoding = crf_decoding_layer( - size=num_label_types, - input=crf_input, - label=chunk, - param_attr=ParamAttr(name="crfw"), - layer_attr=cpu_layer_attr, ) - -sum_evaluator( - name="error", - input=crf_decoding, ) - -chunk_evaluator( - name="chunk_f1", - input=crf_decoding, - label=chunk, - chunk_scheme="IOB", - num_chunk_types=11, ) - -inputs(word, pos, chunk, features) -outputs(crf) diff --git a/v1_api_demo/sequence_tagging/train.sh b/v1_api_demo/sequence_tagging/train.sh deleted file mode 100755 index 37e196c84200dc26ccb523076a81dbc393b1280f..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/train.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -paddle train \ - --config rnn_crf.py \ - --parallel_nn=1 \ - --use_gpu=1 \ - --dot_period=10 \ - --log_period=1000 \ - --test_period=0 \ - --num_passes=10 \ -2>&1 | tee 'train.log' -paddle usage -l 'train.log' -e $? -n "sequence_tagging_train" >/dev/null 2>&1 diff --git a/v1_api_demo/sequence_tagging/train_linear.sh b/v1_api_demo/sequence_tagging/train_linear.sh deleted file mode 100755 index ad6e2d8ee7f813c69f9dd250c6f7bbb4403a0ed5..0000000000000000000000000000000000000000 --- a/v1_api_demo/sequence_tagging/train_linear.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -paddle train \ - --config linear_crf.py \ - --use_gpu=0 \ - --dot_period=100 \ - --log_period=10000 \ - --test_period=0 \ - --num_passes=10 -2>&1 | tee 'train_linear.log' -paddle usage -l 'train_linear.log' -e $? -n "sequence_tagging_train_linear" >/dev/null 2>&1 diff --git a/v1_api_demo/traffic_prediction/README b/v1_api_demo/traffic_prediction/README deleted file mode 100644 index 4c95188583513c332b7d7cb0a32d59336208e1aa..0000000000000000000000000000000000000000 --- a/v1_api_demo/traffic_prediction/README +++ /dev/null @@ -1,7 +0,0 @@ -run by: -cd ./data -sh get_data.sh -cd .. -sh train.sh -sh predict.sh - diff --git a/v1_api_demo/traffic_prediction/data/get_data.sh b/v1_api_demo/traffic_prediction/data/get_data.sh deleted file mode 100755 index f2fa548d4709c0361334f117bfb49e18d83c32f4..0000000000000000000000000000000000000000 --- a/v1_api_demo/traffic_prediction/data/get_data.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -set -x - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -#download the dataset -echo "Downloading traffic data..." -wget http://paddlepaddle.cdn.bcebos.com/demo/traffic/traffic_data.tar.gz - -#extract package -echo "Unzipping..." -tar -zxvf traffic_data.tar.gz - -echo "data/speeds.csv" > train.list -echo "data/speeds.csv" > test.list -echo "data/speeds.csv" > pred.list - -echo "Done." diff --git a/v1_api_demo/traffic_prediction/dataprovider.py b/v1_api_demo/traffic_prediction/dataprovider.py deleted file mode 100644 index c7883b6950c369ee67c39b80ce1cefbbf9350459..0000000000000000000000000000000000000000 --- a/v1_api_demo/traffic_prediction/dataprovider.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * -import sys -import numpy as np -TERM_NUM = 24 -FORECASTING_NUM = 24 -LABEL_VALUE_NUM = 4 - - -def initHook(settings, file_list, **kwargs): - """ - Init hook is invoked before process data. It will set obj.slots and store data meta. - - :param settings: global object. It will passed to process routine. - :type obj: object - :param file_list: the meta file object, which passed from trainer_config.py,but unused in this function. - :param kwargs: unused other arguments. - """ - del kwargs #unused - - settings.pool_size = sys.maxint - #Use a time seires of the past as feature. - #Dense_vector's expression form is [float,float,...,float] - settings.input_types = [dense_vector(TERM_NUM)] - #There are next FORECASTING_NUM fragments you need predict. - #Every predicted condition at time point has four states. - for i in range(FORECASTING_NUM): - settings.input_types.append(integer_value(LABEL_VALUE_NUM)) - - -@provider( - init_hook=initHook, cache=CacheType.CACHE_PASS_IN_MEM, should_shuffle=True) -def process(settings, file_name): - with open(file_name) as f: - #abandon fields name - f.next() - for row_num, line in enumerate(f): - speeds = map(int, line.rstrip('\r\n').split(",")[1:]) - # Get the max index. - end_time = len(speeds) - # Scanning and generating samples - for i in range(TERM_NUM, end_time - FORECASTING_NUM): - # For dense slot - pre_spd = map(float, speeds[i - TERM_NUM:i]) - - # Integer value need predicting, values start from 0, so every one minus 1. - fol_spd = [j - 1 for j in speeds[i:i + FORECASTING_NUM]] - - # Predicting label is missing, abandon the sample. - if -1 in fol_spd: - continue - yield [pre_spd] + fol_spd - - -def predict_initHook(settings, file_list, **kwargs): - settings.pool_size = sys.maxint - settings.input_types = [dense_vector(TERM_NUM)] - - -@provider(init_hook=predict_initHook, should_shuffle=False) -def process_predict(settings, file_name): - with open(file_name) as f: - #abandon fields name - f.next() - for row_num, line in enumerate(f): - speeds = map(int, line.rstrip('\r\n').split(",")) - end_time = len(speeds) - pre_spd = map(float, speeds[end_time - TERM_NUM:end_time]) - yield pre_spd diff --git a/v1_api_demo/traffic_prediction/gen_result.py b/v1_api_demo/traffic_prediction/gen_result.py deleted file mode 100644 index 3da70b30315f863fd3582583e9a29540a09c1e7f..0000000000000000000000000000000000000000 --- a/v1_api_demo/traffic_prediction/gen_result.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -res = [] -with open('./rank-00000') as f: - for line in f: - pred = map(int, line.strip('\r\n;').split(";")) - #raw prediction range from 0 to 3 - res.append([i + 1 for i in pred]) - -file_name = open('./data/pred.list').read().strip('\r\n') - -FORECASTING_NUM = 24 -header = [ - 'id', - '201604200805', - '201604200810', - '201604200815', - '201604200820', - '201604200825', - '201604200830', - '201604200835', - '201604200840', - '201604200845', - '201604200850', - '201604200855', - '201604200900', - '201604200905', - '201604200910', - '201604200915', - '201604200920', - '201604200925', - '201604200930', - '201604200935', - '201604200940', - '201604200945', - '201604200950', - '201604200955', - '201604201000', -] -################### -## To CSV format ## -################### -with open(file_name) as f: - f.next() - print ','.join(header) - for row_num, line in enumerate(f): - fields = line.rstrip('\r\n').split(',') - linkid = fields[0] - print linkid + ',' + ','.join(map(str, res[row_num])) diff --git a/v1_api_demo/traffic_prediction/predict.sh b/v1_api_demo/traffic_prediction/predict.sh deleted file mode 100755 index 2dbd5e8805dd97d35c7d58917f8ec6b5033bda03..0000000000000000000000000000000000000000 --- a/v1_api_demo/traffic_prediction/predict.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.py -# pass choice -model="output/pass-00000" -paddle train \ - --config=$cfg \ - --use_gpu=false \ - --job=test \ - --init_model_path=$model \ - --config_args=is_predict=1 \ - --predict_output_dir=. - -python gen_result.py > result.csv - -rm -rf rank-00000 diff --git a/v1_api_demo/traffic_prediction/train.sh b/v1_api_demo/traffic_prediction/train.sh deleted file mode 100755 index 48dfc5604f80042598c5c779bd450a5808fdfb64..0000000000000000000000000000000000000000 --- a/v1_api_demo/traffic_prediction/train.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.py -paddle train \ - --config=$cfg \ - --save_dir=./output \ - --trainer_count=4 \ - --log_period=1000 \ - --dot_period=10 \ - --num_passes=10 \ - --use_gpu=false \ - --show_parameter_stats_period=3000 \ - 2>&1 | tee 'train.log' diff --git a/v1_api_demo/traffic_prediction/trainer_config.py b/v1_api_demo/traffic_prediction/trainer_config.py deleted file mode 100755 index 52d678624aff7ca2264c3c20e320004217d14397..0000000000000000000000000000000000000000 --- a/v1_api_demo/traffic_prediction/trainer_config.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddle.trainer_config_helpers import * - -################################### DATA Configuration ############################################# -is_predict = get_config_arg('is_predict', bool, False) -trn = './data/train.list' if not is_predict else None -tst = './data/test.list' if not is_predict else './data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, test_list=tst, module="dataprovider", obj=process) -################################### Parameter Configuaration ####################################### -TERM_NUM = 24 -FORECASTING_NUM = 24 -emb_size = 16 -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=1e-3, - learning_method=RMSPropOptimizer()) -################################### Algorithm Configuration ######################################## - -output_label = [] - -link_encode = data_layer(name='link_encode', size=TERM_NUM) -for i in xrange(FORECASTING_NUM): - # Each task share same weight. - link_param = ParamAttr( - name='_link_vec.w', initial_max=1.0, initial_min=-1.0) - link_vec = fc_layer(input=link_encode, size=emb_size, param_attr=link_param) - score = fc_layer(input=link_vec, size=4, act=SoftmaxActivation()) - if is_predict: - maxid = maxid_layer(score) - output_label.append(maxid) - else: - # Multi-task training. - label = data_layer(name='label_%dmin' % ((i + 1) * 5), size=4) - cls = classification_cost( - input=score, name="cost_%dmin" % ((i + 1) * 5), label=label) - output_label.append(cls) -outputs(output_label) diff --git a/v1_api_demo/vae/README.md b/v1_api_demo/vae/README.md deleted file mode 100644 index e55d483b023773900729622a6cac44116fc79c76..0000000000000000000000000000000000000000 --- a/v1_api_demo/vae/README.md +++ /dev/null @@ -1,13 +0,0 @@ -#Variational Autoencoder (VAE) - -This demo implements VAE training described in the original paper (https://arxiv.org/abs/1312.6114). - - -In order to run the model, first download the MNIST dataset by running the shell script in ./data. - -Then you can run the command below. The flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). - -$python vae_train.py [--use_gpu 1] - -The generated images will be stored in ./samples/ -The corresponding models will be stored in ./params/ diff --git a/v1_api_demo/vae/data/get_mnist_data.sh b/v1_api_demo/vae/data/get_mnist_data.sh deleted file mode 100755 index a77c81bf5af9ddb6634ff89460797ca543c5e517..0000000000000000000000000000000000000000 --- a/v1_api_demo/vae/data/get_mnist_data.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env sh -# This script downloads the mnist data and unzips it. -set -e -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -rm -rf "$DIR/mnist_data" -mkdir "$DIR/mnist_data" -cd "$DIR/mnist_data" - -echo "Downloading..." - -for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte -do - if [ ! -e $fname ]; then - wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz - gunzip ${fname}.gz - fi -done diff --git a/v1_api_demo/vae/dataloader.py b/v1_api_demo/vae/dataloader.py deleted file mode 100644 index e9ff95d44f825cd941b5687f754618e66d491e7f..0000000000000000000000000000000000000000 --- a/v1_api_demo/vae/dataloader.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np - - -class MNISTloader(): - def __init__(self, - data_path="./data/mnist_data/", - batch_size=60, - process='train'): - self.batch_size = batch_size - self.data_path = data_path - self._pointer = 0 - self.image_batches = np.array([]) - self.process = process - - def _extract_images(self, filename, n): - f = open(filename, 'rb') - f.read(16) - data = np.fromfile(f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)) - #Mapping data into [-1, 1] - data = data / 255. * 2. - 1 - data_batches = np.split(data, 60000 / self.batch_size, 0) - - f.close() - - return data_batches - - @property - def pointer(self): - return self._pointer - - def load_data(self): - TRAIN_IMAGES = '%s/train-images-idx3-ubyte' % self.data_path - TEST_IMAGES = '%s/t10k-images-idx3-ubyte' % self.data_path - - if self.process == 'train': - self.image_batches = self._extract_images(TRAIN_IMAGES, 60000) - else: - self.image_batches = self._extract_images(TEST_IMAGES, 10000) - - def next_batch(self): - batch = self.image_batches[self._pointer] - self._pointer = (self._pointer + 1) % (60000 / self.batch_size) - return np.array(batch) - - def reset_pointer(self): - self._pointer = 0 diff --git a/v1_api_demo/vae/vae_conf.py b/v1_api_demo/vae/vae_conf.py deleted file mode 100644 index 301dd23793d19ec5946cc7bb07e32c53c04a972b..0000000000000000000000000000000000000000 --- a/v1_api_demo/vae/vae_conf.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * -import numpy as np - -is_generating = get_config_arg("is_generating", bool, False) - -settings(batch_size=32, learning_rate=1e-3, learning_method=AdamOptimizer()) - -X_dim = 28 * 28 -h_dim = 128 -z_dim = 100 - - -def reparameterization(mu, logvar): - eps = ParamAttr(initial_mean=0., initial_std=1) - with mixed_layer() as sigma: - sigma += dotmul_projection(layer_math.exp(logvar) * 0.5, param_attr=eps) - return mu + sigma - - -def q_func(X): - """ - xavier initialization - """ - param_attr = ParamAttr( - name='share.w', initial_mean=0., initial_std=1. / np.sqrt(X_dim / 2.)) - mu_param = ParamAttr( - name='mu.w', initial_mean=0., initial_std=1. / np.sqrt(h_dim / 2.)) - logvar_param = ParamAttr( - name='logvar.w', initial_mean=0., initial_std=1. / np.sqrt(h_dim / 2.)) - - bias_attr = ParamAttr(name='share.bias', initial_mean=0., initial_std=0.) - mu_bias = ParamAttr(name='mu.bias', initial_mean=0., initial_std=0.) - logvar_bias = ParamAttr(name='logvar.bias', initial_mean=0., initial_std=0.) - - share_layer = fc_layer( - X, - size=h_dim, - param_attr=param_attr, - bias_attr=bias_attr, - act=ReluActivation()) - - return (fc_layer( - share_layer, - size=z_dim, - param_attr=mu_param, - bias_attr=mu_bias, - act=LinearActivation()), fc_layer( - share_layer, - size=z_dim, - param_attr=logvar_param, - bias_attr=logvar_bias, - act=LinearActivation())) - - -def generator(z): - - hidden_param = ParamAttr( - name='hidden.w', initial_mean=0., initial_std=1. / np.sqrt(z_dim / 2.)) - hidden_bias = ParamAttr(name='hidden.bias', initial_mean=0., initial_std=0.) - prob_param = ParamAttr( - name='prob.w', initial_mean=0., initial_std=1. / np.sqrt(h_dim / 2.)) - prob_bias = ParamAttr(name='prob.bias', initial_mean=0., initial_std=0.) - - hidden_layer = fc_layer( - z, - size=h_dim, - act=ReluActivation(), - param_attr=hidden_param, - bias_attr=hidden_bias) - prob = fc_layer( - hidden_layer, - size=X_dim, - act=SigmoidActivation(), - param_attr=prob_param, - bias_attr=prob_bias) - - return prob - - -def reconstruct_error(prob, X): - cost = multi_binary_label_cross_entropy(input=prob, label=X) - return cost - - -def KL_loss(mu, logvar): - with mixed_layer() as mu_square: - mu_square += dotmul_operator(mu, mu, scale=1.) - - cost = 0.5 * sum_cost(layer_math.exp(logvar) + mu_square - 1. - logvar) - - return cost - - -if not is_generating: - x_batch = data_layer(name='x_batch', size=X_dim) - mu, logvar = q_func(x_batch) - z_samples = reparameterization(mu, logvar) - prob = generator(z_samples) - outputs(reconstruct_error(prob, x_batch) + KL_loss(mu, logvar)) -else: - z_samples = data_layer(name='noise', size=z_dim) - outputs(generator(z_samples)) diff --git a/v1_api_demo/vae/vae_train.py b/v1_api_demo/vae/vae_train.py deleted file mode 100644 index 1babb011c77b92861cc680a2e1aaa8c9ae5d97b5..0000000000000000000000000000000000000000 --- a/v1_api_demo/vae/vae_train.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import random -import numpy as np -import cPickle -import sys, os -from PIL import Image - -from paddle.trainer.config_parser import parse_config -from paddle.trainer.config_parser import logger -import py_paddle.swig_paddle as api -import dataloader -import matplotlib.pyplot as plt - - -def plot_samples(samples): - fig = plt.figure(figsize=(4, 4)) - gs = gridspec.GridSpec(4, 4) - gs.update(wspace=0.05, hspace=0.05) - for i, sample in enumerate(samples): - plt.subplot(gs[i]) - plt.axis('off') - plt.imshow(sample.reshape(28, 28), cmap='Greys_r') - - return fig - - -def CHECK_EQ(a, b): - assert a == b, "a=%s, b=%s" % (a, b) - - -def get_fake_samples(generator_machine, batch_size, noise): - gen_inputs = api.Arguments.createArguments(1) - gen_inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise)) - gen_outputs = api.Arguments.createArguments(0) - generator_machine.forward(gen_inputs, gen_outputs, api.PASS_TEST) - fake_samples = gen_outputs.getSlotValue(0).copyToNumpyMat() - return fake_samples - - -def copy_shared_parameters(src, dst): - ''' - copy the parameters from src to dst - :param src: the source of the parameters - :type src: GradientMachine - :param dst: the destination of the parameters - :type dst: GradientMachine - ''' - src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] - src_params = dict([(p.getName(), p) for p in src_params]) - - for i in xrange(dst.getParameterSize()): - dst_param = dst.getParameter(i) - src_param = src_params.get(dst_param.getName(), None) - if src_param is None: - continue - src_value = src_param.getBuf(api.PARAMETER_VALUE) - dst_value = dst_param.getBuf(api.PARAMETER_VALUE) - CHECK_EQ(len(src_value), len(dst_value)) - dst_value.copyFrom(src_value) - dst_param.setValueUpdated() - - -def find(iterable, cond): - for item in iterable: - if cond(item): - return item - return None - - -def get_layer_size(model_conf, layer_name): - layer_conf = find(model_conf.layers, lambda x: x.name == layer_name) - assert layer_conf is not None, "Cannot find '%s' layer" % layer_name - return layer_conf.size - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--use_gpu", default="1", help="1 means use gpu for training") - parser.add_argument("--gpu_id", default="0", help="the gpu_id parameter") - args = parser.parse_args() - use_gpu = args.use_gpu - assert use_gpu in ["0", "1"] - - if not os.path.exists("./samples/"): - os.makedirs("./samples/") - - if not os.path.exists("./params/"): - os.makedirs("./params/") - - api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', - '--log_period=1000', '--gpu_id=' + args.gpu_id, - '--save_dir=' + "./params/") - - conf = "vae_conf.py" - - trainer_conf = parse_config(conf, "is_generating=False") - gener_conf = parse_config(conf, "is_generating=True") - - batch_size = trainer_conf.opt_config.batch_size - - noise_dim = get_layer_size(gener_conf.model_config, "noise") - - mnist = dataloader.MNISTloader(batch_size=batch_size) - mnist.load_data() - - training_machine = api.GradientMachine.createFromConfigProto( - trainer_conf.model_config) - - generator_machine = api.GradientMachine.createFromConfigProto( - gener_conf.model_config) - - trainer = api.Trainer.create(trainer_conf, training_machine) - - trainer.startTrain() - - for train_pass in xrange(100): - trainer.startTrainPass() - mnist.reset_pointer() - i = 0 - it = 0 - while mnist.pointer != 0 or i == 0: - X = mnist.next_batch().astype('float32') - - inputs = api.Arguments.createArguments(1) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(X)) - - trainer.trainOneDataBatch(batch_size, inputs) - - if it % 1000 == 0: - - outputs = api.Arguments.createArguments(0) - training_machine.forward(inputs, outputs, api.PASS_TEST) - loss = np.mean(outputs.getSlotValue(0).copyToNumpyMat()) - print "\niter: {}".format(str(it).zfill(3)) - print "VAE loss: {}".format(str(loss).zfill(3)) - - #Sync parameters between networks (GradientMachine) at the beginning - copy_shared_parameters(training_machine, generator_machine) - - z_samples = np.random.randn(batch_size, - noise_dim).astype('float32') - samples = get_fake_samples(generator_machine, batch_size, - z_samples) - - #Generating the first 16 images for a picture. - figure = plot_samples(samples[:16]) - plt.savefig( - "./samples/{}_{}.png".format( - str(train_pass).zfill(3), str(i).zfill(3)), - bbox_inches='tight') - plt.close(figure) - i += 1 - it += 1 - - trainer.finishTrainPass() - trainer.finishTrain() - - -if __name__ == '__main__': - main()